content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
from time import time
import logging
from colorlog import ColoredFormatter
class Logger:
"""Log class"""
def __init__(self, *args, log_level=None, **kwargs):
"""Initialisation method"""
super().__init__(*args, **kwargs)
if log_level is not None:
self.log_level = log_level
else:
self.log_level = logging.INFO
self.log = self.setup_logger()
def setup_logger(self):
"""Return a logger with a default ColoredFormatter."""
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s.%(msecs)03d][%(levelname)-8s] %(name)-20s: %(reset)s%(white)s%(message)s",
datefmt="%d-%b-%y %H:%M:%S",
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
logger = logging.getLogger(self.__class__.__name__)
logger.setLevel(self.log_level)
if len(logger.handlers) == 0:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def log_time(func):
"""Log time function.
Args:
func (method): wrapped function.
Returns:
?: result of the wrapped function.
"""
def wrapper(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
end = round(time() - start, 5)
args[0].log.debug(f"END {func.__name__:*<70} {end}s")
return result, end
return wrapper
|
# -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] <arg> ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
from warnings import warn
import sys
import optparse
warn(
"lgc.optparser module is deprecated, use lgc.clcommands instead",
DeprecationWarning,
stacklevel=2,
)
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=""):
"""name of the command, name of module or tuple of functions
(run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(
mod_or_funcs, tuple
), "mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print("\ncommands:")
for cmdname, (_, help) in self._commands.items():
print("% 10s - %s" % (cmdname, help))
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ("-h", "--help"):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error("unknown command")
self.prog = "%s %s" % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec("from %s import run, add_options" % mod_or_f)
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error("incorrect number of arguments")
return run, options, args
|
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
###### helper functions. Use them when needed #######
def get_title_from_index(index):
return df[df.index == index]["title"].values[0]
def get_index_from_title(title):
return df[df.title == title]["index"].values[0]
##################################################
##Step 1: Read CSV File
df = pd.read_csv("movie_dataset.csv")
#print df.columns
##Step 2: Select Features
features = ['keywords','cast','genres','director']
##Step 3: Create a column in DF which combines all selected features
for feature in features:
df[feature] = df[feature].fillna('')
def combine_features(row):
try:
return row['keywords'] +" "+row['cast']+" "+row["genres"]+" "+row["director"]
except:
print "Error:", row
df["combined_features"] = df.apply(combine_features,axis=1)
#print "Combined Features:", df["combined_features"].head()
##Step 4: Create count matrix from this new combined column
cv = CountVectorizer()
count_matrix = cv.fit_transform(df["combined_features"])
##Step 5: Compute the Cosine Similarity based on the count_matrix
cosine_sim = cosine_similarity(count_matrix)
movie_user_likes = "Avatar"
## Step 6: Get index of this movie from its title
movie_index = get_index_from_title(movie_user_likes)
similar_movies = list(enumerate(cosine_sim[movie_index]))
## Step 7: Get a list of similar movies in descending order of similarity score
sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True)
## Step 8: Print titles of first 50 movies
i=0
for element in sorted_similar_movies:
print get_title_from_index(element[0])
i=i+1
if i>50:
break
|
from catalyst import dl, metrics
from catalyst.data.loader import BatchPrefetchLoaderWrapper
from fusion.runner import ABaseRunner
import torch
import torch.nn.functional as F
from typing import Mapping, Any
class CatalystRunner(ABaseRunner, dl.Runner):
def predict_batch(
self,
batch: Mapping[str, Any],
**kwargs
) -> Mapping[str, Any]:
"""
Args:
batch:
kwargs:
Return
"""
x, y = batch
return self.model([x_.to(self.device) for x_ in x]), y
# ToDo: _handle_batch -> handle_batch Catalyst v21
def handle_batch(self, batch: Mapping[str, Any]) -> None:
"""
batch:
:return:
"""
x, y = self._unpack_batch(batch)
outputs = self.model(x)
loss = self.criterion(outputs, y)
if isinstance(loss, tuple):
loss, raw_losses = loss
self.batch_metrics.update(raw_losses)
if self.is_train_loader:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_metrics['loss'] = loss.item()
for key in ["loss"]:
self.meters[key].update(self.batch_metrics[key], self.batch_size)
self.batch = {"targets": y}
# ToDo: Add self.batch for callbacks
for source_id, source_z in outputs.z.items():
probs = F.softmax(source_z, dim=-1)
self.batch[f"logits_{source_id}"] = source_z
self.batch[f"probs_{source_id}"] = probs
def get_loaders(self, stage):
return self._loaders
def on_loader_start(self, runner):
super().on_loader_start(runner)
self.meters = {
key: metrics.AdditiveValueMetric(compute_on_call=False)
for key in ["loss"]
}
def on_loader_end(self, runner):
for key in ["loss"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"csv": dl.CSVLogger(logdir=self._logdir),
"tensorboard": dl.TensorboardLogger(logdir=self._logdir),
} |
import os
from mongoengine import connect
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
# set and env var:'mongodb://magnetu:<password>@<mongo host>:<port>/<db name>'
MONGODB = os.environ.get('MONGODB')
# imp also for flask-login
SECRET_KEY = os.environ.get('SECRET_KEY')
# Enpoints are not CSRF protected atm
WTF_CSRF_ENABLED = False
# SendGrid creds
SG_USER = os.environ.get('SG_USER')
SG_PASSWORD = os.environ.get('SG_PASSWORD')
# Segment.com key
SEGMENT_KEY = os.environ.get('SEGMENT_KEY')
def connect_db():
connect('magnetdb', host=MONGODB)
|
from enum import Enum
from typing import Dict
import attr
class Status(Enum):
Unknown = 0
Errored = 1
Completed = 2
Canceled = 3
@attr.s(auto_attribs=True)
class Result:
headers: Dict[str, str]
body: bytes
metadata: Dict[str, str]
trailers: Dict[str, str]
status: Status
|
from enum import Enum, auto
import logging
import os.path
from airflow.exceptions import AirflowException
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from bsh_azure.hooks.box_hook import BoxHook
# Quiet chatty libs
logging.getLogger('boxsdk').setLevel(logging.ERROR)
class BoxItemType(Enum):
FILE = auto()
FOLDER = auto()
class BoxSensor(BaseSensorOperator):
# For jinja support, such as:
# {{ task_instance.xcom_pull(task_ids='foo', key='some_name') }}
template_fields = ['box_item_path', 'box_item_type']
@apply_defaults
def __init__(self,
box_item_path,
box_item_type=BoxItemType.FILE,
box_conn_id='box_default',
*args,
**kwargs):
super(BoxSensor, self).__init__(*args, **kwargs)
self.box_item_path = box_item_path
self.box_item_type = box_item_type
self.box_conn_id = box_conn_id
self._box_hook = None
def poke(self, context):
self._box_hook = BoxHook(self.box_conn_id)
box_root = self._box_hook.get_root()
if self.box_item_type == BoxItemType.FOLDER:
self.log.info(f"Checking for folder: {self.box_item_path}")
folders = self.box_item_path.split('/')
return self._box_hook.get_folder(box_root, folders) or False
self.log.info(f"Checking for file: {self.box_item_path}")
path, file = os.path.split(self.box_item_path)
if not file:
raise AirflowException("Invalid path to file")
folders = path.split('/')[-1::-1]
parent = self._box_hook.get_folder(box_root, folders)
return self._box_hook.get_file(parent, file) or False
|
class CaseObservation(object):
def __init__(self, date, observation, country):
self.observation_date = date
self.observation = observation
self.country = country
@classmethod
def get_csv_row_to_dict(cls, row):
return {'observation_date': row[1], 'city': row[2], 'country': row[3], 'confirmed': row[5],
'deaths': row[6], 'recovered': row[7]}
def __str__(self):
return "date : {date} | country : {country} | observation : [ {observation} ]".format(
date=self.observation_date,
country=self.country,
observation=self.observation)
class Observation(object):
def __init__(self):
self.confirmed = 0
self.deaths = 0
self.recovered = 0
def __repr__(self):
return "Observations are confirmed: {confirmed} , deaths : {deaths} , recovered: {recovered}".format(
confirmed=self.confirmed,
deaths=self.deaths,
recovered=self.recovered)
def __str__(self):
return " confirmed: {confirmed} | deaths : {deaths} | recovered: {recovered} ".format(
confirmed=self.confirmed,
deaths=self.deaths,
recovered=self.recovered)
def __add__(self, other):
if isinstance(other, Observation):
observation = Observation()
observation.confirmed = self.confirmed + other.confirmed
observation.deaths = self.deaths + other.deaths
observation.recovered = self.recovered + other.recovered
return observation
@staticmethod
def from_dict(row):
def get_default(key):
try:
return int(row[key].split('.')[0])
except KeyError or ValueError:
return 0
observation = Observation()
observation.confirmed = get_default('confirmed')
observation.deaths = get_default('deaths')
observation.recovered = get_default('recovered')
return observation
|
from django.test import TestCase
from django.contrib.auth.models import User
from coursebuilder.models import (
QTYPES,
CourseModule,
ModuleSection,
Extras,
Grade,
Question,
Quiz,
)
def create_course_module():
return CourseModule.objects.create(
course_order=1,
module="cb_module1",
title="First Module",
next_module="empty",
content="<p>Welcome to the first module!<p>",
)
def create_module_section(module):
return ModuleSection.objects.create(
module=module,
title="First Lesson",
order=1,
lesson_order=1,
content="<p>Welcome to the first lesson!<p>",
)
def create_quiz(module):
return Quiz.objects.create(module=module, minpass=75.0, numq=5, show_answers=True)
def create_question(module):
return Question.objects.create(
module=module,
text="Which of the following database does "
"CourseBuilder use in the back-end?",
difficulty=2,
qtype=QTYPES[0],
correct="B",
answerA="MongoDB",
answerB="SQLite",
answerC="PostgreSQL",
)
def create_extras():
return Extras.objects.create(title="Extras", content="<p>Content for Extras<p>")
def create_grade(quiz, participant):
return Grade.objects.create(
quiz=quiz,
score=33.33,
participant=participant,
quiz_name="Quiz for What Are Modules? How to Add Them?",
)
class CourseModuleModelTest(TestCase):
def test_create(self):
module = create_course_module()
self.assertTrue(isinstance(module, CourseModule))
self.assertEqual(module.course_order, 1)
self.assertEqual(module.module, "cb_module1")
self.assertEqual(module.title, "First Module")
self.assertEqual(module.next_module, "empty")
self.assertEqual(module.content, "<p>Welcome to the first module!<p>")
self.assertEqual(str(module), module.title)
def test_get(self):
module = create_course_module()
response = CourseModule.objects.get(module="cb_module1")
self.assertTrue(response.module, module.module)
def test_delete(self):
create_course_module()
response = CourseModule.objects.filter(module="cb_module1").delete()
self.assertIsNotNone(response)
class ModuleSectionModelTest(TestCase):
def setUp(self):
self.course_module = create_course_module()
def test_create(self):
module_section = create_module_section(self.course_module)
self.assertTrue(isinstance(module_section, ModuleSection))
self.assertEqual(module_section.module, self.course_module)
self.assertEqual(module_section.title, "First Lesson")
self.assertEqual(module_section.order, 1)
self.assertEqual(module_section.lesson_order, 1)
self.assertEqual(module_section.content, "<p>Welcome to the first lesson!<p>")
self.assertEqual(str(module_section), module_section.title)
def test_get(self):
module_section = create_module_section(self.course_module)
response = ModuleSection.objects.get(title="First Lesson")
self.assertTrue(response.title, module_section.title)
def test_delete(self):
create_module_section(self.course_module)
response = ModuleSection.objects.filter(title="First Lesson").delete()
self.assertIsNotNone(response)
def tearDown(self):
CourseModule.objects.all().delete()
class QuizModelTest(TestCase):
def setUp(self):
self.course_module = create_course_module()
def test_create(self):
quiz = create_quiz(self.course_module)
self.assertTrue(isinstance(quiz, Quiz))
self.assertEqual(quiz.module, self.course_module)
self.assertEqual(quiz.minpass, 75.0)
self.assertEqual(quiz.show_answers, True)
self.assertEqual(str(quiz), "Quiz for " + str(quiz.module))
def test_get(self):
quiz = create_quiz(self.course_module)
response = Quiz.objects.get(module=self.course_module)
self.assertTrue(response.module, quiz.module)
def test_delete(self):
create_quiz(self.course_module)
response = Quiz.objects.filter(module=self.course_module).delete()
self.assertIsNotNone(response)
def tearDown(self):
CourseModule.objects.all().delete()
class QuestionModelTest(TestCase):
def setUp(self):
self.course_module = create_course_module()
def test_create(self):
question = create_question(self.course_module)
self.assertTrue(isinstance(question, Question))
self.assertEqual(question.module, self.course_module)
self.assertEqual(
question.text,
"Which of the following database does "
"CourseBuilder use in the back-end?",
)
self.assertEqual(question.difficulty, 2)
self.assertEqual(question.qtype, QTYPES[0])
self.assertEqual(question.correct, "B")
self.assertEqual(question.answerA, "MongoDB")
self.assertEqual(question.answerB, "SQLite")
self.assertEqual(question.answerC, "PostgreSQL")
self.assertEqual(question.answerD, None)
self.assertEqual(question.answerE, None)
self.assertEqual(str(question), question.text)
def test_get(self):
question = create_question(self.course_module)
response = Question.objects.get(module=self.course_module)
self.assertTrue(response.module, question.module)
def test_delete(self):
create_question(self.course_module)
response = Question.objects.filter(module=self.course_module).delete()
self.assertIsNotNone(response)
def tearDown(self):
CourseModule.objects.all().delete()
class ExtrasModelTest(TestCase):
def test_create(self):
extras = create_extras()
self.assertTrue(isinstance(extras, Extras))
self.assertEqual(extras.title, "Extras")
self.assertEqual(extras.content, "<p>Content for Extras<p>")
self.assertEqual(str(extras), extras.title)
def test_get(self):
extras = create_extras()
response = Extras.objects.get(title="Extras")
self.assertTrue(response.title, extras.title)
def test_delete(self):
create_extras()
response = Extras.objects.filter(title="Extras").delete()
self.assertIsNotNone(response)
class GradeModelTest(TestCase):
def setUp(self):
self.course_module = create_course_module()
self.participant = User.objects.create_user("DevOps_team", "devops", "devops")
self.quiz = create_quiz(self.course_module)
def test_create(self):
grade = create_grade(self.quiz, self.participant)
self.assertTrue(isinstance(grade, Grade))
self.assertEqual(grade.quiz, self.quiz)
self.assertEqual(grade.participant, self.participant)
self.assertEqual(grade.quiz_name, "Quiz for What Are Modules? How to Add Them?")
def test_get(self):
grade = create_grade(self.quiz, self.participant)
response = Grade.objects.get(quiz=self.quiz)
self.assertTrue(response.score, grade.score)
def test_delete(self):
create_grade(self.quiz, self.participant)
response = Grade.objects.filter(quiz=self.quiz).delete()
self.assertIsNotNone(response)
|
import operator
import re
from http import HTTPStatus
from flask import Flask, jsonify, request
app = Flask(__name__)
variable_re = re.compile(r"[A-Za-z][A-Za-z0-9_]*")
func_map = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
}
variables = {}
@app.route("/calc", methods=['POST'])
def calculate_expression():
body = request.get_json()
left, op, right = body['expression'].split()
left = _get_value(left)
right = _get_value(right)
result = func_map[op](left, right)
result = f"{result:.2f}"
return jsonify(result=result), HTTPStatus.OK
def _get_value(token):
if variable_re.fullmatch(token):
value = variables[token]
else:
value = token
return float(value)
@app.route("/variable/<name>", methods=['PUT'])
def put_variable(name):
body = request.get_json()
if name in variables:
status = HTTPStatus.NO_CONTENT # 204
else:
status = HTTPStatus.CREATED # 201
variables[name] = body['value']
return '', status
@app.route("/variable/<name>", methods=['GET'])
def get_variable(name):
if name not in variables:
return '', HTTPStatus.NOT_FOUND
value = variables[name]
value = f"{float(value):.2f}"
return jsonify(value=value), HTTPStatus.OK
|
from __future__ import division
from ea import adult_selection
from ea import parent_selection
from ea import reproduction
from ea import main
from ea import binary_gtype
from ea.ea_globals import *
def war(p1_orig, p2_orig, reployment_factor, loss_factor):
'''Fight a single war and return score for each side'''
p1 = list(p1_orig)
p2 = list(p2_orig)
strength1 = 1
strength2 = 1
reploy1 = 0
reploy2 = 0
score = 0
for i in xrange(len(p1)):
battle = cmp(strength1*(p1[i] + reploy1), strength2*(p2[i] + reploy2))
score += battle
if battle < 0:
if strength1 > loss_factor:
strength1 -= loss_factor
else:
strength1 = 0
if i < len(p1) - 1:
reploy2 += reployment_factor * (p2[i] - p1[i]) / (len(p1) - i - 1)
elif battle > 0:
if strength2 > loss_factor:
strength2 -= loss_factor
else:
strength2 = 0
if i < len(p1) - 1:
reploy1 += reployment_factor * (p1[i] - p2[i]) / (len(p1) - i - 1)
return (cmp(score, 0) + 1, cmp(0, score) + 1)
def fitness_test(population, reployment_factor, loss_factor):
'''Blotto fitness test, a.k.a. the great war'''
warscores = [[0] for i in xrange(len(population))]
for i in xrange(len(population)):
for j in xrange(i+1, len(population)):
score1, score2 = war(population[i].ptype, population[j].ptype, reployment_factor, loss_factor)
warscores[i] += [score1]
warscores[j] += [score2]
tested = []
for i, ind in enumerate(population):
tested += [gpfa_t(gtype=ind.gtype, ptype=ind.ptype, fitness=warscores[i], age=ind.age)]
return tested
def develop(population):
'''Development function for blotto.
Interpret groups of four bits as numbers, then normalize them so they sum to 1.0'''
developed = []
for ind in population:
gtype = ind.gtype
intlist = []
for i in xrange(0, len(gtype), 4):
intlist += [gtype[i] * 8 + gtype[i+1] * 4 + gtype[i+2] * 2 + gtype[i+3]]
floatlist = [x / sum(intlist) for x in intlist]
developed += [gpa_t(gtype=gtype, ptype=floatlist, age=ind.age)]
return developed
def visualize(generation_list):
'''Generate visualizations using matplotlib'''
return None
if __name__=='__main__':
battles = int(raw_input("Input number of battles:\n"))
popsize = int(raw_input("Input population size:\n"))
reployment_factor = float(raw_input("Input reployment factor:\n"))
loss_factor = float(raw_input("Input loss factor:\n"))
adult_selector, litter_size = adult_selection.gen_adult_selection(popsize)
parent_selector = parent_selection.gen_parent_selection(litter_size)
mutate = binary_gtype.gen_mutate()
crossover = binary_gtype.gen_crossover()
reproducer = reproduction.gen_reproduction(mutate, crossover)
generations = int(input("Input max number of generations:\n"))
fitness_goal = 0
initial = [ga_t(gtype=binary_gtype.generate(4*battles), age=0) for i in xrange(popsize)]
dec_fitness_test = lambda population: fitness_test(population, reployment_factor, loss_factor)
generation_list = main.evolutionary_algorithm(initial, develop, dec_fitness_test, adult_selector, parent_selector, reproducer, generations, fitness_goal)
visualize(generation_list)
|
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input as vgg_preprocess_input
from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
from moviepy.video.io.VideoFileClip import VideoFileClip
from PIL import Image
import getopt
import sys
sample_fold = './SampleVidImg'
class Extractor():
def __init__(self, weights=None, layer='avg_pool'):
"""Either load pretrained from imagenet, or load our saved
weights from our own training."""
self.weights = weights # so we can check elsewhere which model
if weights is None:
# Get model with pretrained weights.
input_tensor = Input(shape=(299, 299, 3))
base_model = InceptionV3(
input_shape=(299, 299, 3),
weights='imagenet',
include_top=True
)
# We'll extract features at the final pool layer.
self.model = Model(
input=base_model.input,
output=base_model.get_layer(layer).output
)
else:
# Load the model first.
self.model = load_model(weights)
# Then remove the top so we get features not predictions.
# From: https://github.com/fchollet/keras/issues/2371
self.model.layers.pop()
self.model.layers.pop() # two pops to get to pool layer
self.model.outputs = [self.model.layers[-1].output]
self.model.output_layers = [self.model.layers[-1]]
self.model.layers[-1].outbound_nodes = []
def extract(self, image_path):
img = image.load_img(image_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
def extract_PIL(self, img):
img = img.resize((299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
class VGGExtractor():
def __init__(self, weights=None):
"""Either load pretrained from imagenet, or load our saved
weights from our own training."""
self.weights = weights # so we can check elsewhere which model
if weights is None:
# Get model with pretrained weights.
input_tensor = Input(shape=(224, 224, 3))
base_model = VGG19(weights='imagenet', include_top=True)
# We'll extract features at the final pool layer.
self.model = Model(
input=base_model.input,
output=base_model.layers[-3].output
)
else:
# Load the model first.
self.model = load_model(weights)
# Then remove the top so we get features not predictions.
# From: https://github.com/fchollet/keras/issues/2371
self.model.layers.pop()
self.model.layers.pop()
self.model.layers.pop() # two pops to get to pool layer
self.model.outputs = [self.model.layers[-1].output]
self.model.output_layers = [self.model.layers[-1]]
self.model.layers[-1].outbound_nodes = []
def extract(self, image_path):
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = vgg_preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
def extract_PIL(self, img):
img.resize((224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
if __name__ == '__main__':
options, _ = getopt.getopt(sys.argv[1:], '', ['file='])
for opt in options:
if opt[0] == '--file':
video_path = opt[1]
clip = VideoFileClip(video_path, audio=False)
coun = 0
max_frame_cout = 2000
start_count = 60 * 20 # 60 fps * 17 sec
imgs_path = []
for clip in clip.iter_frames():
coun += 1
if coun % 60 != 0 or coun < start_count:
continue
elif len(imgs_path) >= max_frame_cout:
break
img = Image.fromarray(clip)
step = 30
sample_size = (150, 200)
margin = 80
for x in range(0 + margin, img.size[0] - sample_size[0] - margin, step):
for y in range(0 + margin, img.size[1] - sample_size[1] - margin, step):
crop = img.crop(
(x, y, x + sample_size[0], y + sample_size[1])
)
crop.save(sample_fold + '/%d_[%d_%d].jpg' % (coun, x, y))
imgs_path.append(sample_fold + '/%d_[%d_%d].jpg' % (coun, x, y))
# img.save(sample_fold + '/%d.jpg' % coun)
# imgs_path.append(sample_fold + '/%d.jpg' % coun)
model = Extractor()
feats = []
for img_p in imgs_path:
feats.append(model.extract(img_p))
feats = np.array(feats)
np.save('InceptionV3_feats.npy', feats)
model = VGGExtractor()
feats = []
for img_p in imgs_path:
feats.append(model.extract(img_p))
feats = np.array(feats)
np.save('VGG_feats.npy', feats)
np.save('img_list.npy', imgs_path)
|
#!/usr/bin/env python3
# Copyright 2021 Chris Farris <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore.exceptions import ClientError
from time import sleep
import boto3
import csv
import datetime as dt
import json
import logging
import os
import re
import sys
HEADER=["InstanceId", "Region", "LaunchTime", "InstanceType", "StateTransitionReason", "DisableApiTermination"]
def main(args, logger):
'''Executes the Primary Logic of the Fast Fix'''
# If they specify a profile use it. Otherwise do the normal thing
if args.profile:
session = boto3.Session(profile_name=args.profile)
else:
session = boto3.Session()
instances = [] # array of rows to pass to DictWriter
tag_keys = ["tag.Name"] # We need to pass all the tag keys to the DictWriter
# Get all the Regions for this account
for region in get_regions(session, args):
ec2_client = session.client("ec2", region_name=region)
instance_list = list_stopped_instances(ec2_client, region, args)
logger.info(f"Found {len(instance_list)} stopped instances to cleanup in {region}")
for i in instance_list:
i['Region'] = region
# parse the annoying way AWS returns tags into a proper dict
tags = parse_tags(i['Tags'])
for key, value in tags.items():
# we need to capture the list of tag_keys for Dictwriter, but we prepend with "tag." to avoid
# overriding an instance key
if f"tag.{key}" not in tag_keys:
tag_keys.append(f"tag.{key}")
i[f"tag.{key}"] = value # now add to the instance dict
# We now need to get the disableApiTermination attribute which wasn't provided by our describe-instances
response = ec2_client.describe_instance_attribute(Attribute='disableApiTermination', InstanceId=i['InstanceId'])
i['DisableApiTermination'] = response['DisableApiTermination']['Value']
instances.append(i)
# Now write the final CSV file
with open(args.outfile, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=HEADER + tag_keys, extrasaction='ignore')
writer.writeheader()
for i in instances:
writer.writerow(i)
exit(0)
def list_stopped_instances(ec2_client, region, args):
output = []
response = ec2_client.describe_instances(
Filters=[{'Name': 'instance-state-name', 'Values': ['stopped']}],
MaxResults=1000
)
threshold_time = dt.datetime.today() - dt.timedelta(days=int(args.older_than_days))
logger.info(f"Looking for Stopped Instances older than {threshold_time}")
for r in response['Reservations']:
for i in r['Instances']:
# print(json.dumps(i, indent=2, default=str))
# We only want to process Instances that have been stopped longer than --older-than-days
# The only way to know when an instance was stopped is to parse the StateTransitionReason
if i['StateTransitionReason'] == "":
logger.error(f"Instance {i['InstanceId']} in state {i['State']['Name']} has no StateTransitionReason")
continue # nothing to do here, move along, move along
# Need to extract a date from string that looks like: "User initiated (2021-01-11 22:52:15 GMT)"
# Note: so far the sample set of this string is small, more logic may be needed here
try:
stopped_date_str = re.search('\((.+?)\)', i['StateTransitionReason']).group(1)
# print(stopped_date_str)
stopped_date = dt.datetime.strptime(stopped_date_str, '%Y-%m-%d %H:%M:%S %Z')
# print(stopped_date)
# If the stopped date is older than our threshold, return the instance info
if stopped_date < threshold_time:
logger.debug(f"Instance {i['InstanceId']} is {i['State']['Name']} for {i['StateTransitionReason']}, which is older that {threshold_time}")
output.append(i)
except AttributeError:
pass
return(output)
def parse_tags(tagset):
output = {}
for t in tagset:
output[t['Key']] = t['Value']
return(output)
def get_regions(session, args):
'''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that'''
# If we specifed a region on the CLI, return a list of just that
if args.region:
return([args.region])
# otherwise return all the regions, us-east-1 first
ec2 = session.client('ec2', region_name="us-east-1")
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
# return us-east-1 first, but dont return it twice
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true')
parser.add_argument("--region", help="Only Process Specified Region")
parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)")
parser.add_argument("--outfile", help="Save the list of Instances to this file", default="instances-to-terminate.csv")
parser.add_argument("--older-than-days", help="Only Snapshot and Terminate Instances that have been stopped more than X days", default=90)
# parser.add_argument("--batch-size", help="Process no more than N stopped instances per region", default=10)
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
logger = logging.getLogger(sys.argv[0])
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.error:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
# Silence Boto3 & Friends
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# create formatter
if args.timestamp:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
else:
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
except ClientError as e:
if e.response['Error']['Code'] == "RequestExpired":
print("Credentials expired")
exit(1)
else:
raise
|
import utils
from collections import defaultdict
def read_input(filename):
text = utils.read(filename, 'string').split(',')
return [int(n) for n in text]
class Part1(utils.Part):
def __init__(self):
super().__init__(0)
def run(self, input, is_test):
n = 2020
while len(input) < n:
input.append(Part1.turn(input))
return input[-1]
@staticmethod
def turn(input):
num = input[-1]
if input.count(num) == 1:
return 0
turns = [i for i, n in enumerate(input) if n == num]
return turns[-1] - turns[-2]
class Part2(utils.Part):
def __init__(self):
super().__init__(175594)
def run(self, input, is_test):
turns_per_num = defaultdict(list)
for turn_count, num in enumerate(input):
turns_per_num[num].append(turn_count)
num = input[-1]
for turn_count in range(len(input), 30000000):
turns = turns_per_num[num]
num = turns[-1] - turns[-2] if len(turns) > 1 else 0
turns_per_num[num].append(turn_count)
return num
|
from flask import Flask, render_template, request
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
app = Flask(__name__)
english_bot = ChatBot("English Bot",silence_performance_warning=True)
english_bot.set_trainer(ChatterBotCorpusTrainer)
english_bot.train("chatterbot.corpus.english")
@app.route("/")
def home():
return render_template("index.html")
@app.route("/process_chat")
def get_raw_response():
try:
chat_val = str(request.args.get('chat_txt'))
return str(english_bot.get_response(chat_val))
except Exception,e:
print "error..",e
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=9019)
# app.run()
|
class Constants(object):
"""Constants holder class that stores the bulk of the fixed strings used in the library."""
IG_SIG_KEY = '99e16edcca71d7c1f3fd74d447f6281bd5253a623000a55ed0b60014467a53b1'
IG_CAPABILITIES = '3brTBw==' # = base64.b64encode(struct.pack('<i', 131316445)).decode('ascii')
SIG_KEY_VERSION = '4'
APP_VERSION = '26.0.0.10.86'
APPLICATION_ID = '567067343352427'
FB_HTTP_ENGINE = 'Liger'
ANDROID_VERSION = 24
ANDROID_RELEASE = '7.0'
PHONE_MANUFACTURER = 'samsung'
PHONE_DEVICE = 'SM-G930F'
PHONE_MODEL = 'herolte'
PHONE_DPI = '640dpi'
PHONE_RESOLUTION = '1440x2560'
PHONE_CHIPSET = 'samsungexynos8890'
USER_AGENT_FORMAT = \
'Instagram %(app_version)s Android (%(android_version)d/%(android_release)s; ' \
'%(dpi)s; %(resolution)s; %(brand)s; %(device)s; %(model)s; %(chipset)s; en_US)'
USER_AGENT_EXPRESSION = \
r'Instagram\s(?P<app_version>[^\s]+)\sAndroid\s\((?P<android_version>[0-9]+)/(?P<android_release>[0-9\.]+);\s' \
r'(?P<dpi>\d+dpi);\s(?P<resolution>\d+x\d+);\s(?P<manufacturer>[^;]+);\s(?P<device>[^;]+);\s' \
r'(?P<model>[^;]+);\s(?P<chipset>[^;]+);'
USER_AGENT = USER_AGENT_FORMAT % {
'app_version': APP_VERSION,
'android_version': ANDROID_VERSION,
'android_release': ANDROID_RELEASE,
'brand': PHONE_MANUFACTURER,
'device': PHONE_DEVICE,
'model': PHONE_MODEL,
'dpi': PHONE_DPI,
'resolution': PHONE_RESOLUTION,
'chipset': PHONE_CHIPSET}
LOGIN_EXPERIMENTS = 'ig_android_sms_consent_in_reg,ig_android_flexible_sampling_universe,ig_android_background_conf_resend_fix,ig_restore_focus_on_reg_textbox_universe,ig_android_analytics_data_loss,ig_android_gmail_oauth_in_reg,ig_android_phoneid_sync_interval,ig_android_stay_at_one_tap_on_error,ig_android_link_to_access_if_email_taken_in_reg,ig_android_non_fb_sso,ig_android_family_apps_user_values_provider_universe,ig_android_reg_inline_errors,ig_android_run_fb_reauth_on_background,ig_fbns_push,ig_android_reg_omnibox,ig_android_show_password_in_reg_universe,ig_android_background_phone_confirmation_v2,ig_fbns_blocked,ig_android_access_redesign,ig_android_please_create_username_universe,ig_android_gmail_oauth_in_access,ig_android_reg_whiteout_redesign_v3' # noqa
EXPERIMENTS = 'ig_android_disk_cache_match_journal_size_to_cache_max_count,ig_android_ad_move_carousel_indicator_to_ufi_universe,ig_android_universe_video_production,ig_android_live_follow_from_comments_universe,ig_android_ad_watchandinstall_universe,ig_android_live_analytics,ig_android_video_captions_universe,ig_android_offline_location_feed,ig_android_ontact_invite_universe,ig_android_insta_video_reconnect_viewers,ig_android_live_broadcast_blacklist,ig_android_checkbox_instead_of_button_as_follow_affordance_universe,ig_android_ufi_redesign_video_social_context,ig_android_stories_surface_universe,ig_android_verified_comments_universe,ig_android_preload_media_ahead_in_current_reel,android_instagram_prefetch_suggestions_universe,ig_android_direct_inbox_tray_suggested_user_universe,ig_android_direct_blue_tab,ig_android_light_status_bar_universe,ig_android_asset_button_new_content_animation,ig_android_async_network_tweak_universe,ig_android_react_native_lazy_modules_killswitch,ig_android_instavideo_remove_nux_comments,ig_video_copyright_whitelist,ig_android_ad_sponsor_label_story_top_design_universe,ig_android_business_action,ig_android_direct_link_style,ig_android_live_heart_enhancements_universe,ig_android_preload_item_count_in_reel_viewer_buffer,ig_android_auto_retry_post_mode,ig_android_fix_render_thread_crash,ig_android_shopping,ig_fbns_preload_default,ig_android_gesture_dismiss_reel_viewer,ig_android_ad_logger_funnel_logging_universe,ig_android_direct_links,ig_android_links_receivers,ig_android_ad_impression_backtest,ig_android_offline_freshness_toast_10_12,ig_android_invites_without_token_universe,ig_android_immersive_viewer,ig_android_mqtt_skywalker,ig_fbns_push,ig_android_react_native_universe,ig_android_special_brush,ig_android_live_consumption_abr,ig_android_story_viewer_social_context,ig_android_explore_verified_badges_stories_universe,ig_android_video_loopcount_int,ig_android_enable_main_feed_reel_tray_preloading,ig_android_ad_watchbrowse_universe,ig_android_react_native_ota,ig_android_discover_people_icon_in_others_profile,ig_android_log_mediacodec_info,ig_android_enable_back_navigation_nux_universe,ig_android_cold_start_feed_request,ig_video_use_sve_universe,ig_android_offline_explore_10_14,ig_android_stories_teach_gallery_location,ig_android_http_stack_experiment_2017,ig_android_stories_device_tilt,ig_android_pending_request_search_bar,ig_android_fb_topsearch_sgp_fork_request,ig_android_animation_perf_reporter_timeout,ig_android_new_block_flow,ig_android_direct_address_links,ig_android_share_profile_photo_to_feed_universe,ig_android_stories_private_likes,ig_android_text_background,ig_android_stories_video_prefetch_kb,ig_android_su_activity_feed,ig_android_live_stop_broadcast_on_404,ig_android_render_iframe_interval,ig_android_boomerang_entry,ig_android_camera_shortcut_universe,ig_android_fetch_fresh_viewer_list,ig_android_ad_media_url_logging_universe,ig_android_phone_confirm_rate_limit_language_universe,ig_android_keep_http_cache_on_user_switch,ig_android_facebook_twitter_profile_photos,ig_android_full_user_detail_endpoint,ig_android_direct_sqlite_universe,ig_android_family_bridge_share,ig_android_search,ig_android_insta_video_consumption_titles,ig_android_live_notification_control,ig_android_camera_universe,ig_android_instavideo_audio_only_mode,ig_android_live_video_reactions_consumption_universe,ig_android_swipe_fragment_container,ig_creation_growth_holdout,ig_android_live_save_to_camera_roll_universe,ig_android_ad_cta_redesign_universe,ig_android_sticker_region_tracking,ig_android_unified_inbox,ig_android_offline_main_feed_10_11,ig_android_chaining_teaser_animation,ig_android_business_conversion_value_prop_v2,ig_android_redirect_to_low_latency_universe,ig_android_feed_header_profile_ring_universe,ig_family_bridges_holdout_universe,ig_android_following_follower_social_context,ig_android_video_keep_screen_on,ig_android_profile_photo_as_media,ig_android_insta_video_consumption_infra,ig_android_sms_consent_in_edit_profile,ig_android_infinite_scrolling_launch,ig_in_feed_commenting,ig_android_live_broadcast_enable_year_class_2011,ig_android_direct_phone_number_links,ig_android_direct_share_sheet_ring,ig_android_stories_weblink_creation,ig_android_histogram_reporter,ig_android_network_cancellation,ig_android_react_native_insights,ig_android_insta_video_audio_encoder,ig_android_family_bridge_bookmarks,ig_android_dash_for_vod_universe,ig_android_direct_mutually_exclusive_experiment_universe,ig_android_stories_selfie_sticker,ig_android_ad_add_per_event_counter_to_logging_event,ig_android_rtl,ig_android_direct_send_auto_retry,ig_android_direct_video_autoplay_scroll,ig_android_promote_from_profile_button,ig_android_share_spinner,ig_android_profile_share_username,ig_android_sidecar_edit_screen_universe,ig_promotions_unit_in_insights_landing_page,ig_android_save_longpress_tooltip,ig_android_constrain_image_size_universe,ig_android_business_new_graphql_endpoint_universe,ig_ranking_following,ig_android_universe_reel_video_production,ig_android_sfplt,ig_android_offline_hashtag_feed,ig_android_live_skin_smooth,ig_android_stories_posting_offline_ui,ig_android_direct_add_local_thread_in_inbox,ig_android_swipe_navigation_x_angle_universe,ig_android_offline_mode_holdout,ig_android_non_square_first,ig_android_insta_video_drawing,ig_android_react_native_usertag,ig_android_swipeablefilters_universe,ig_android_analytics_logger_running_background_universe,ig_android_save_all,ig_android_reel_viewer_data_buffer_size,ig_android_disk_cache_has_sanity_check,ig_direct_quality_holdout_universe,ig_android_family_bridge_discover,ig_android_react_native_restart_after_error_universe,ig_story_tray_peek_content_universe,ig_android_profile,ig_android_high_res_upload_2,ig_android_http_service_same_thread,ig_android_remove_followers_universe,ig_android_skip_video_render,ig_android_live_viewer_comment_prompt_universe,ig_android_search_client_matching,ig_explore_netego,ig_android_boomerang_feed_attribution,ig_android_explore_story_sfslt_universe,ig_android_rendering_controls,ig_android_os_version_blocking,ig_android_encoder_width_safe_multiple_16,ig_android_direct_video_autoplay,ig_android_snippets_profile_nux,ig_android_e2e_optimization_universe,ig_android_disk_usage,ig_android_save_collections,ig_android_live_see_fewer_videos_like_this_universe,ig_android_live_view_profile_from_comments_universe,ig_formats_and_feedbacks_holdout_universe,ig_fbns_blocked,ig_android_instavideo_periodic_notif,ig_android_empty_feed_redesign,ig_android_marauder_update_frequency,ig_android_suggest_password_reset_on_oneclick_login,ig_android_live_special_codec_size_list,ig_android_enable_share_to_messenger,ig_android_live_video_reactions_creation_universe,ig_android_live_hide_viewer_nux,ig_android_channels_home,ig_android_sidecar_gallery_universe,ig_android_live_using_webrtc,ig_android_insta_video_broadcaster_infra_perf,ig_android_business_conversion_social_context,android_ig_fbns_kill_switch,ig_android_retry_story_seen_state,ig_android_react_native_universe_kill_switch,ig_android_stories_book_universe,ig_android_all_videoplayback_persisting_sound,ig_android_cache_layer_bytes_threshold,ig_android_comment_deep_linking_v1,ig_android_business_promotion,ig_android_anrwatchdog,ig_android_qp_kill_switch,ig_android_ad_always_send_ad_attribution_id_universe,ig_android_2fac,ig_direct_bypass_group_size_limit_universe,ig_android_promote_simplified_flow,ig_android_share_to_whatsapp,ig_fbns_dump_ids,ig_android_ad_show_mai_cta_loading_state_universe,ig_android_skywalker_live_event_start_end,ig_android_toplive_verified_badges_universe,ig_android_live_join_comment_ui_change,ig_android_draw_button_new_tool_animation,ig_video_max_duration_qe_preuniverse,ig_android_http_stack_kz_debug,ig_request_cache_layer,ig_android_carousel_feed_indicators_universe,ig_android_new_optic,ig_android_mark_reel_seen_on_Swipe_forward,ig_fbns_shared,ig_android_capture_slowmo_mode,ig_android_save_multi_select,ig_android_mead,ig_android_video_single_surface,ig_android_offline_reel_feed,ig_android_video_download_logging,ig_android_last_edits,ig_android_exoplayer_4142,ig_android_snippets_haptic_feedback,ig_android_gl_drawing_marks_after_undo_backing,ig_android_mark_seen_state_on_viewed_impression,ig_android_live_backgrounded_reminder_universe,ig_android_disable_comment_public_test,ig_android_user_detail_endpoint,ig_android_comment_tweaks_universe,ig_android_add_to_last_post,ig_save_insights,ig_android_live_enhanced_end_screen_universe,ig_android_ad_add_counter_to_logging_event,ig_android_sidecar,ig_android_direct_split_new_message_button,ig_android_grid_video_icon,ig_android_ad_watchandlead_universe,ig_android_progressive_jpeg,ig_android_offline_story_stickers,ig_android_direct_inbox_unseen_hint,ig_android_top_live_titles_universe,ig_android_video_prefetch_for_connectivity_type,ig_android_ad_holdout_16m5_universe,ig_android_sync_on_background_enhanced,ig_android_upload_reliability_use_fbupload_lib,ig_android_samsung_app_badging,ig_android_offline_commenting,ig_android_insta_video_abr_resize,ig_android_insta_video_sound_always_on,ig_android_disable_comment' # noqa
|
# https://www.geeksforgeeks.org/how-to-sort-an-array-in-a-single-loop/
def sort_array(a):
length=len(a)
j = 0
while j < length-1:
if (a[j] > a[j+1]):
temp = a[j]
a[j] = a[j+1]
a[j+1] = temp
# updating the value of j = -1
# so after getting updated for j++
# in the loop it becomes 0 and
# the loop begins from the start.
j= -1
j+=1
return a
if __name__ == "__main__":
# Declaring an integer array of size 11.
arr = [1, 2, 99, 9, 8,
7, 6, 0, 5, 4, 3]
# Printing the original Array.
print("Original array: ", arr)
# Sorting the array using a single loop
arr = sort_array(arr)
# Printing the sorted array.
print("Sorted array: ", arr) |
'''(2,5)
'''
def foo():
bar = 1
|
from django.contrib import admin
from .models import Product
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ['name', 'price','quantity']
list_filter = ['name',]
search_fields = ['name',]
prepopulated_fields = {'slug': ('name',)}
list_editable = ['price', 'quantity']
admin.site.register(Product, ProductAdmin) |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import csv
import os
import random
import numpy as np
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for text classification
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
# create character dict
self.userdata['cdict'] = {}
for i, c in enumerate(self.alphabet):
self.userdata['cdict'][c] = i + 1 # indices start at 1
# assign unknown characters to the same next available ID
self.userdata['unknown_char_id'] = len(self.alphabet) + 1
if self.class_labels_file:
with open(self.class_labels_file) as f:
self.userdata['class_labels'] = f.read().splitlines()
@override
def encode_entry(self, entry):
label = np.array([int(entry['class'])])
# convert characters to numbers
sample = []
count = 0
max_chars = self.max_chars_per_sample
for field in entry['fields']:
for char in field.lower():
if max_chars and count < self.max_chars_per_sample:
if char in self.userdata['cdict']:
num = self.userdata['cdict'][char]
else:
num = self.userdata['unknown_char_id']
sample.append(num)
count += 1
else:
break
# convert to numpy array
sample = np.array(sample, dtype='uint8')
# pad if necessary
if max_chars and count < max_chars:
sample = np.append(sample, np.full(
(max_chars - count),
fill_value=self.userdata['unknown_char_id'],
dtype='uint8'))
# make it a 3-D array
sample = sample[np.newaxis, np.newaxis, :]
return sample, label
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-classification"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_inference_form():
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Classification"
@override
def itemize_entries(self, stage):
if not self.userdata['is_inference_db']:
if stage == constants.TRAIN_DB:
entries = self.read_csv(self.train_data_file)
elif stage == constants.VAL_DB:
if self.val_data_file:
entries = self.read_csv(self.val_data_file)
else:
entries = []
else:
entries = []
else:
if stage == constants.TEST_DB:
if not (bool(self.test_data_file) ^ bool(self.snippet)):
raise ValueError("You must provide either a data file or a snippet")
if self.test_data_file:
entries = self.read_csv(self.test_data_file, False)
elif self.snippet:
entries = [{'class': '0', 'fields': [self.snippet]}]
else:
entries = []
return entries
def read_csv(self, filename, shuffle=True):
entries = []
with open(filename) as f:
reader = csv.DictReader(f, fieldnames=['class'], restkey='fields')
for row in reader:
entries.append(row)
random.shuffle(entries)
return entries
|
import math
import numpy as np
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
import utils
import utils_tc
def ncc_global_np(source: np.ndarray, target: np.ndarray, **params):
if source.shape != target.shape:
raise ValueError("Resolution of both the images must be the same.")
source_mean, target_mean = np.mean(source), np.mean(target)
source_std, target_std = np.std(source), np.std(target)
ncc = np.mean((source - source_mean) * (target - target_mean) / (source_std * target_std))
if ncc != ncc:
ncc = -1
return -ncc
def ncc_global_tc(sources: tc.Tensor, targets: tc.Tensor, device: str="cpu", **params):
sources = (sources - tc.min(sources)) / (tc.max(sources) - tc.min(sources))
targets = (targets - tc.min(targets)) / (tc.max(targets) - tc.min(targets))
if sources.size() != targets.size():
raise ValueError("Shape of both the tensors must be the same.")
size = sources.size()
prod_size = tc.prod(tc.Tensor(list(size[1:])))
sources_mean = tc.mean(sources, dim=list(range(1, len(size)))).view((sources.size(0),) + (len(size)-1)*(1,))
targets_mean = tc.mean(targets, dim=list(range(1, len(size)))).view((targets.size(0),) + (len(size)-1)*(1,))
sources_std = tc.std(sources, dim=list(range(1, len(size))), unbiased=False).view((sources.size(0),) + (len(size)-1)*(1,))
targets_std = tc.std(targets, dim=list(range(1, len(size))), unbiased=False).view((targets.size(0),) + (len(size)-1)*(1,))
ncc = (1 / prod_size) * tc.sum((sources - sources_mean) * (targets - targets_mean) / (sources_std * targets_std), dim=list(range(1, len(size))))
ncc = tc.mean(ncc)
if ncc != ncc:
ncc = tc.autograd.Variable(tc.Tensor([-1]), requires_grad=True).to(device)
return -ncc
def ncc_local_tc(sources: tc.Tensor, targets: tc.Tensor, device: str="cpu", **params):
"""
Implementation inspired by VoxelMorph (with some modifications).
"""
sources = (sources - tc.min(sources)) / (tc.max(sources) - tc.min(sources))
targets = (targets - tc.min(targets)) / (tc.max(targets) - tc.min(targets))
ndim = len(sources.size()) - 2
if ndim not in [2, 3]:
raise ValueError("Unsupported number of dimensions.")
try:
win_size = params['win_size']
except:
win_size = 9
window = (win_size, ) * ndim
sum_filt = tc.ones([1, 1, *window]).to(device)
pad_no = math.floor(window[0] / 2)
stride = ndim * (1,)
padding = ndim * (pad_no,)
conv_fn = getattr(F, 'conv%dd' % ndim)
sources_denom = sources**2
targets_denom = targets**2
numerator = sources*targets
sources_sum = conv_fn(sources, sum_filt, stride=stride, padding=padding)
targets_sum = conv_fn(targets, sum_filt, stride=stride, padding=padding)
sources_denom_sum = conv_fn(sources_denom, sum_filt, stride=stride, padding=padding)
targets_denom_sum = conv_fn(targets_denom, sum_filt, stride=stride, padding=padding)
numerator_sum = conv_fn(numerator, sum_filt, stride=stride, padding=padding)
size = np.prod(window)
u_sources = sources_sum / size
u_targets = targets_sum / size
cross = numerator_sum - u_targets * sources_sum - u_sources * targets_sum + u_sources * u_targets * size
sources_var = sources_denom_sum - 2 * u_sources * sources_sum + u_sources * u_sources * size
targets_var = targets_denom_sum - 2 * u_targets * targets_sum + u_targets * u_targets * size
ncc = cross * cross / (sources_var * targets_var + 1e-5)
return -tc.mean(ncc)
def mse_np(source: np.ndarray, target: np.ndarray, **params):
if source.shape != target.shape:
raise ValueError("Resolution of both the images must be the same.")
return np.mean((source - target)**2)
def mse_tc(sources: tc.Tensor, targets: tc.Tensor, device: str="cpu", **params):
if sources.size() != targets.size():
raise ValueError("Shape of both the tensors must be the same.")
return tc.mean((sources - targets)**2)
def ngf_np(source: np.ndarray, target: np.ndarray, **params):
epsilon = params['epsilon']
try:
return_response = params['return_response']
except:
return_response = False
ndim = len*(source.shape)
if ndim == 2:
sgx, sgy = np.gradient(source)
tgx, tgy = np.gradient(target)
ds = np.sqrt(sgx**2 + sgy**2 + epsilon**2)
dt = np.sqrt(tgx**2 + tgy**2 + epsilon**2)
nm = sgx*tgx + sgy*tgy
elif ndim == 3:
sgx, sgy, sgz = np.gradient(source)
tgx, tgy, tgz = np.gradient(target)
ds = np.sqrt(sgx**2 + sgy**2 + sgz**2 + epsilon**2)
dt = np.sqrt(tgx**2 + tgy**2 + tgz**2 + epsilon**2)
nm = sgx*tgx + sgy*tgy + sgz*tgz
else:
raise ValueError("Unsupported number of dimensions.")
response = 1 - (nm / (ds * dt))**2
ngf = np.mean(response)
if return_response:
return ngf, response
def ngf_tc(sources: tc.Tensor, targets: tc.Tensor, device: str="cpu", **params):
epsilon = params['epsilon']
try:
return_response = params['return_response']
except:
return_response = False
ndim = len(sources.size()) - 2
if ndim == 2:
sgx, sgy = utils_tc.tensor_gradient(sources, device=device)
tgx, tgy = utils_tc.tensor_gradient(targets, device=device)
ds = tc.sqrt(sgx**2 + sgy**2 + epsilon**2)
dt = tc.sqrt(tgx**2 + tgy**2 + epsilon**2)
nm = sgx*tgx + sgy*tgy
elif ndim == 3:
sgx, sgy, sgz = utils_tc.tensor_gradient(sources, device=device)
tgx, tgy, tgz = utils_tc.tensor_gradient(targets, device=device)
ds = tc.sqrt(sgx**2 + sgy**2 + sgz**2 + epsilon**2)
dt = tc.sqrt(tgx**2 + tgy**2 + tgz**2 + epsilon**2)
nm = sgx*tgx + sgy*tgy + sgz*tgz
else:
raise ValueError("Unsupported number of dimensions.")
response = 1 - (nm / (ds * dt))**2
ngf = tc.mean(response)
if return_response:
return ngf, response
else:
return ngf
def mind_ssc_tc(sources: tc.Tensor, targets: tc.Tensor, device: str="cpu", **params):
"""
Implementation inspired by https://github.com/voxelmorph/voxelmorph/pull/145 (with some modifications).
"""
sources = (sources - tc.min(sources)) / (tc.max(sources) - tc.min(sources))
targets = (targets - tc.min(targets)) / (tc.max(targets) - tc.min(targets))
try:
radius = params['radius']
dilation = params['dilation']
except:
radius = 2
dilation = 2
ndim = len(sources.size()) - 2
if ndim not in [2, 3]:
raise ValueError("Unsupported number of dimensions.")
if ndim == 2:
sources = sources.unsqueeze(3)
targets = targets.unsqueeze(3)
def pdist_squared(x):
xx = (x**2).sum(dim=1).unsqueeze(2)
yy = xx.permute(0, 2, 1)
dist = xx + yy - 2.0 * tc.bmm(x.permute(0, 2, 1), x)
dist[dist != dist] = 0
dist = tc.clamp(dist, 0.0, np.inf)
return dist
def mind_ssc(images, radius, dilation):
kernel_size = radius * 2 + 1
six_neighbourhood = tc.Tensor([[0,1,1],
[1,1,0],
[1,0,1],
[1,1,2],
[2,1,1],
[1,2,1]]).long()
dist = pdist_squared(six_neighbourhood.t().unsqueeze(0)).squeeze(0)
x, y = tc.meshgrid(tc.arange(6), tc.arange(6))
mask = ((x > y).view(-1) & (dist == 2).view(-1))
idx_shift1 = six_neighbourhood.unsqueeze(1).repeat(1,6,1).view(-1,3)[mask,:]
idx_shift2 = six_neighbourhood.unsqueeze(0).repeat(6,1,1).view(-1,3)[mask,:]
mshift1 = tc.zeros(12, 1, 3, 3, 3).cuda()
mshift1.view(-1)[tc.arange(12) * 27 + idx_shift1[:,0] * 9 + idx_shift1[:, 1] * 3 + idx_shift1[:, 2]] = 1
mshift2 = tc.zeros(12, 1, 3, 3, 3).cuda()
mshift2.view(-1)[tc.arange(12) * 27 + idx_shift2[:,0] * 9 + idx_shift2[:, 1] * 3 + idx_shift2[:, 2]] = 1
rpad1 = nn.ReplicationPad3d(dilation)
rpad2 = nn.ReplicationPad3d(radius)
ssd = F.avg_pool3d(rpad2((F.conv3d(rpad1(images), mshift1, dilation=dilation) - F.conv3d(rpad1(images), mshift2, dilation=dilation)) ** 2), kernel_size, stride=1)
mind = ssd - tc.min(ssd, 1, keepdim=True)[0]
mind_var = tc.mean(mind, 1, keepdim=True)
mind_var = tc.clamp(mind_var, mind_var.mean().item()*0.001, mind_var.mean().item()*1000)
mind /= mind_var
mind = tc.exp(-mind)
mind = mind[:, tc.Tensor([6, 8, 1, 11, 2, 10, 0, 7, 9, 4, 5, 3]).long(), :, :, :]
return mind
return tc.mean((mind_ssc(sources, radius, dilation) - mind_ssc(targets, radius, dilation))**2) |
"""simple api server for gpt-j"""
from flask import Flask, request, jsonify
from transformers import GPTJForCausalLM, AutoTokenizer
import torch
model = GPTJForCausalLM.from_pretrained("../gpt-j-6B", torch_dtype=torch.float16)
model = model.to(torch.device("cuda"))
tokenizer = AutoTokenizer.from_pretrained("../gpt-j-6B")
app = Flask(__name__)
@app.route('/v1/generate', methods=['POST'])
def generate():
content = request.get_json()
input_ids = tokenizer(content['prompt'], return_tensors="pt").input_ids
token_len = input_ids.size(dim=1)
reply_length = content['reply_length'] if 'reply_length' in content else 50
max_length = content['max_length'] if 'max_length' in content else token_len + reply_length
input_ids = input_ids.to(torch.device("cuda"))
gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=max_length)
gen_text = tokenizer.batch_decode(gen_tokens)[0]
return jsonify({"generated_text": gen_text})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
|
import io
import pickle
from google.appengine.ext import ndb
class ImportFixingUnpickler(pickle.Unpickler):
"""
In the case where we're reading a CachedQueryResult written by
the py3 version of TBA, we'll need to fix the imports to be
compatible with this one.
"""
def find_class(self, module, name):
renamed_module = module
prefix = "backend.common."
if module.startswith(prefix):
renamed_module = module[len(prefix):]
return pickle.Unpickler.find_class(self, renamed_module, name)
class ImportFixingPickleProperty(ndb.BlobProperty):
def _to_base_type(self, value):
"""Convert a value to the "base" value type for this property.
Args:
value (Any): The value to be converted.
Returns:
bytes: The pickled ``value``.
"""
file_obj = io.BytesIO()
pickle.Pickler(file_obj, protocol=2).dump(value)
return file_obj.getvalue()
def _from_base_type(self, value):
"""Convert a value from the "base" value type for this property.
Args:
value (bytes): The value to be converted.
Returns:
Any: The unpickled ``value``.
"""
file_obj = io.BytesIO(value)
return ImportFixingUnpickler(file_obj).load()
class CachedQueryResult(ndb.Model):
"""
A CachedQueryResult stores the result of an NDB query
"""
# Only one of result or result_dict should ever be populated for one model
result = ImportFixingPickleProperty(compressed=True) # Raw models
result_dict = ndb.JsonProperty() # Dict version of models
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
|
import click
from completions import get_local_archives
from constants import colors
import context
help_text = """(a) Lists the project's archives."""
@click.command(name='list', help=help_text)
@click.argument('pattern', default='*', type=click.STRING,
autocompletion=get_local_archives)
@click.pass_context
def list_archives(ctx, pattern):
project_name = ctx.obj['config'].get('name')
project_id = ctx.obj['config'].get('id')
click.secho('[{}]'.format(project_name), fg=colors.success, bold=True)
click.echo()
click.echo('Local archives:')
local_archives = ctx.obj['archives'].get_local_archives(pattern, include_details=True)
if len(local_archives) < 1:
click.secho(' [No archives found]', fg=colors.highlight)
else:
table = list(map(lambda d: [d['name'], d['date'], d['size']], local_archives))
ctx.obj['simple_table'].print(
table,
border_styles={
'fg': colors.borders
},
column_settings=[
{},
{
'align': 'center',
},
{
'align': 'right',
},
],
headers=[
'Name',
'Date',
'Size',
],
width='full',
show_horizontal_lines=False,
)
click.echo()
click.echo('Remote archives:')
try:
remote_archives = ctx.obj['ssh'].get_archives(project_id, include_details=True)
if len(remote_archives) < 1:
click.secho(' [No remote archives found]', fg=colors.highlight)
else:
table = list(map(lambda d: [d['name'], d['date'], d['size']], remote_archives))
ctx.obj['simple_table'].print(
table,
border_styles={
'fg': colors.borders
},
column_settings=[
{},
{
'align': 'center',
},
{
'align': 'right',
},
],
headers=[
'Name',
'Date',
'Size',
],
width='full',
show_horizontal_lines=False,
)
except:
click.echo('!!!!')
pass
@click.command(name='a', help=help_text, hidden=True)
@click.argument('pattern', default='*', type=click.STRING,
autocompletion=get_local_archives)
@click.pass_context
def list_archives_alias(ctx, pattern):
context.init(ctx)
context.init_project(ctx)
ctx.invoke(list_archives, pattern=pattern)
|
# tool to parse out the package names from top-pypi-packages
import json
from pprint import pprint
outfile = open('pkgnames.txt',"w")
with open('top-pypi-packages-365-days.json') as f:
data = json.load(f)
rows = data['rows']
for info in rows:
print(info["project"])
outfile.write(info["project"] + '\n')
outfile.close()
|
""" Module for containing a base http error and handler for returning exceptions from views. """
from uuid import uuid4
from flask import make_response
from utils import get_logger
LOGGER = get_logger(__name__)
class HttpError(Exception):
""" Base exception for returning error responses via exception. """
response_code = 500
def __init__(self, message, response_code=None):
super().__init__(message)
self.message = message
self.id = uuid4()
if response_code is not None:
self.response_code = response_code
@property
def type(self):
""" Get type of this error as a string. """
return self.__class__.__qualname__
@property
def response(self):
""" Convert the exception into an api response. """
return make_response(
{"reason": self.message, "error_id": self.id}, self.response_code
)
class SocketIOEventError(Exception):
""" Errors that should be handled by emitting an event to current socketio connection. """
event = "error"
def __init__(self, message, event=None):
super().__init__(message)
self.message = message
self.id = uuid4()
if event is not None:
self.event = event
@property
def type(self):
""" Get type of this error as a string. """
return self.__class__.__qualname__
@property
def response(self):
""" Convert exception into data for a response. """
return {"reason": self.message, "error_id": str(self.id)}
def __str__(self) -> str:
return self.message
def __repr__(self) -> str:
return f"{self.type}(message={self.message}, event={self.event})<id={self.id}>"
|
class Solution:
def fizzBuzz(self, n: int):
res = []
for i in range(1,n+1):
if i % 3 == 0 and i % 5 == 0:
res.append('FizzBuzz')
elif i % 3 == 0:
res.append('Fizz')
elif i % 5 == 0:
res.append('Buzz')
else:
res.append(str(i))
return res
if __name__ == '__main__':
solution = Solution()
print(solution.fizzBuzz(11)) |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This encapsulate the main Findit APIs for external requests."""
import logging
from common.constants import DEFAULT_SERVICE_ACCOUNT
from findit_v2.model.luci_build import LuciFailedBuild
from findit_v2.services import build_util
from findit_v2.services import projects
from findit_v2.services.analysis.compile_failure import compile_analysis
from findit_v2.services.analysis.test_failure import test_analysis
from findit_v2.services.detection import api as detection_api
from findit_v2.services.failure_type import StepTypeEnum
def OnSupportedBuildCompletion(project, bucket, builder_name, build_id,
build_result):
"""Processes a completed build from a builder Findit is supporting.
Args:
project (str): Luci project of the build.
build_id (int): Id of the build.
Returns:
False if it is unsupported or skipped; otherwise True.
"""
if build_result != 'FAILURE':
# Skip builds that didn't fail.
logging.debug('Build %s/%s/%s/%s is not a failure', project, bucket,
builder_name, build_id)
return False
build, context = build_util.GetBuildAndContextForAnalysis(project, build_id)
if not build:
return False
detection_api.OnBuildFailure(context, build)
return True
def OnRerunBuildCompletion(project, build_id):
"""Processes a completed rerun builds.
Args:
project (str): Luci project of the build.
build_id (int): Id of the build.
Returns:
False if it is unsupported or skipped; otherwise True.
"""
rerun_build, context = build_util.GetBuildAndContextForAnalysis(
project, build_id)
if not rerun_build:
return False
if rerun_build.created_by != 'user:{}'.format(DEFAULT_SERVICE_ACCOUNT):
logging.info('Build %d is not triggered by Findit.', rerun_build.id)
return False
return detection_api.OnRerunBuildCompletion(context, rerun_build)
def OnBuildCompletion(project, bucket, builder_name, build_id, build_result):
"""Processes the completed build.
Args:
project (str): Luci project of the build.
bucket (str): Luci bucket of the build.
builder_name (str): Luci builder name of the build.
build_id (int): Id of the build.
build_result (str): Status of the build. E.g. SUCCESS, FAILURE, etc.
Returns:
False if it is unsupported or skipped; otherwise True.
"""
# Skip builders that are not in the whitelist of a supported project/bucket.
builder_type = projects.GetBuilderType(project, bucket, builder_name)
if builder_type == projects.BuilderTypeEnum.SUPPORTED:
return OnSupportedBuildCompletion(project, bucket, builder_name, build_id,
build_result)
if builder_type == projects.BuilderTypeEnum.RERUN:
return OnRerunBuildCompletion(project, build_id)
logging.info('Unsupported build %s/%s/%s/%s.', project, bucket, builder_name,
build_id)
return False
def OnBuildFailureAnalysisResultRequested(request):
"""Returns the findings of an analysis for a failed build.
Since Findit v2 only supports compile failure on cros builds, this api will
simply respond an empty response on other failures. This is to prevent Findit
spending too many pixels to tell users many failures are not supported.
Args:
request(findit_result.BuildFailureAnalysisRequest): request for a build
failure.
Returns:
(findit_result.BuildFailureAnalysisResponseCollection): Analysis results
for the requested build.
"""
build_id = request.build_id
build_alternative_id = request.build_alternative_id
if build_id:
build_entity = LuciFailedBuild.get_by_id(build_id)
if not build_entity:
logging.debug('No LuciFailedBuild entity for build %d.', request.build_id)
return []
else:
build_entity = LuciFailedBuild.GetBuildByNumber(
build_alternative_id.project, build_alternative_id.bucket,
build_alternative_id.builder, build_alternative_id.number)
if not build_entity:
logging.debug('No LuciFailedBuild entity for build %s/%s/%s/%d.',
build_alternative_id.project, build_alternative_id.bucket,
build_alternative_id.builder, build_alternative_id.number)
return []
if build_entity.build_failure_type == StepTypeEnum.COMPILE:
return compile_analysis.OnCompileFailureAnalysisResultRequested(
request, build_entity)
if build_entity.build_failure_type == StepTypeEnum.TEST:
return test_analysis.OnTestFailureAnalysisResultRequested(
request, build_entity)
logging.debug(
'Findit v2 only supports compile or test failure analysis, '
'so no results for %d with %s failures.', build_id,
build_entity.build_failure_type)
return []
|
from importlib import import_module
import humps
from sqlalchemy import MetaData, BigInteger, Column
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import sessionmaker, synonym
from ang.config import SETTINGS_MODULE
class Base:
@declared_attr
def __tablename__(self) -> str:
assert '.' in self.__module__, f'Unexpected module name for {self}: "{self.__module__}"'
app = self.__module__.split('.')[0]
name = humps.decamelize(self.__name__)
if not name.endswith('s'):
name += 's'
return f'{app}_{name}'
__mapper_args__ = {"eager_defaults": True}
@declared_attr
def id(cls):
""" Return first primary key column, or create a new one. """
for attr in dir(cls):
if attr == 'id' or attr.startswith('__'):
continue
val = getattr(cls, attr)
if isinstance(val, Column) and val.primary_key:
return synonym(attr)
return Column(BigInteger, primary_key=True, index=True)
def __repr__(self):
return f'<{str(self.__class__)} #{self.id}>'
convention = {
'all_column_names': lambda constraint, table: '_'.join([
column.name for column in constraint.columns.values()
]),
'ix': 'ix__%(table_name)s__%(all_column_names)s',
'uq': 'uq__%(table_name)s__%(all_column_names)s',
'ck': 'ck__%(table_name)s__%(constraint_name)s',
'fk': 'fk__%(table_name)s__%(all_column_names)s__%(referred_table_name)s',
'pk': 'pk__%(table_name)s'
}
metadata = MetaData(naming_convention=convention)
Model = declarative_base(cls=Base, metadata=metadata)
settings = import_module(SETTINGS_MODULE)
engine = create_async_engine(settings.DATABASE_URL, echo=True)
async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
# TODO: await engine.dispose() - https://www.starlette.io/events/
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import unittest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from eventlet import spawn, Timeout, listen
from swift.common import utils
from swift.container import updater as container_updater
from swift.container import server as container_server
from swift.common.db import ContainerBroker
from swift.common.ring import RingData
from swift.common.utils import normalize_timestamp
class TestContainerUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
ring_file = os.path.join(self.testdir, 'account.ring.gz')
with closing(GzipFile(ring_file, 'wb')) as f:
pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'ip': '127.0.0.1', 'port': 12345, 'device': 'sda1',
'zone': 0},
{'id': 1, 'ip': '127.0.0.1', 'port': 12345, 'device': 'sda1',
'zone': 2}], 30),
f)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_creation(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '2',
'node_timeout': '5',
})
self.assert_(hasattr(cu, 'logger'))
self.assert_(cu.logger is not None)
self.assertEquals(cu.devices, self.devices_dir)
self.assertEquals(cu.interval, 1)
self.assertEquals(cu.concurrency, 2)
self.assertEquals(cu.node_timeout, 5)
self.assert_(cu.get_account_ring() is not None)
def test_run_once(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.run_once()
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assert_(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c')
cb.initialize(normalize_timestamp(1))
cu.run_once()
info = cb.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
cu.run_once()
info = cb.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 3)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/0/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assert_('x-put-timestamp' in headers)
self.assert_('x-delete-timestamp' in headers)
self.assert_('x-object-count' in headers)
self.assert_('x-bytes-used' in headers)
except BaseException, err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _junk in xrange(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
cu.run_once()
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 3)
self.assertEquals(info['reported_object_count'], 1)
self.assertEquals(info['reported_bytes_used'], 3)
def test_unicode(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
})
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='\xce\xa9')
cb.initialize(normalize_timestamp(1))
cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
def accept(sock, addr):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
out.flush()
inc.read()
except BaseException, err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _junk in xrange(2):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
cu.run_once()
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 3)
self.assertEquals(info['reported_object_count'], 1)
self.assertEquals(info['reported_bytes_used'], 3)
if __name__ == '__main__':
unittest.main()
|
"""
Testing parse_mol modules, consists of 4 functions:
parse_ligands(ligand_list: List[string]) -> List[OBMol]
parse_protein(protein: string) -> OBMol
Mutate input list
enumerate_ligand_files(
ligand_pose -> List[None],
ligand_files -> List[string]
) -> None
enumerate_ligand_file_list(
ligand_pose -> List[None],
ligand_file_list -> List[string]
) -> None
"""
from pyplif_hippos import parse_ligands, parse_protein, enumerate_ligand_files, enumerate_ligand_file_list
def test_parse_ligands(vina_ligands_mol2):
# Arrange
# Act
ligand_mol_list = parse_ligands(vina_ligands_mol2)
# Assert
assert len(ligand_mol_list) == 5
assert ligand_mol_list[0].NumAtoms() == 31
def test_parse_protein():
# Arrange
mol_path = "tests/data/direct_ifp/mol2_vina/"
protein_name = mol_path + "protein_vina.mol2"
# Act
protein_mol = parse_protein(protein_name)
arg116 = protein_mol.GetResidue(39)
# Assert
assert arg116.GetName() == "ARG116"
assert protein_mol.NumResidues() == 390
def test_enumerate_ligand_files(vina_ligands_mol2):
# Arrange
ligand_pose = ["tests/data/direct_ifp/mol2_vina/vina0.mol2"]
# Act
enumerate_ligand_files(ligand_pose, vina_ligands_mol2)
# Assert
assert len(ligand_pose) == 6
assert ligand_pose[5][-10:] == "vina5.mol2"
def test_enumerate_ligand_file_list():
# Arrange
ligand_pose = ["tests/data/direct_ifp/mol2_vina/vina0.mol2"]
ligand_file_list = [
"tests/data/direct_ifp/ligand_mol2_1.lst",
"tests/data/direct_ifp/ligand_mol2_2.lst",
]
# Act
enumerate_ligand_file_list(ligand_pose, ligand_file_list)
# Assert
assert len(ligand_pose) == 11
assert ligand_pose[10][-11:] == "vina10.mol2"
|
#Faça um programa que leia 5 números e informe a soma e a média dos números
soma = 0
for contador in range(5):
numero = int(input("Digite um número: "))
soma += numero
print(f"A soma dos números digitados é: {soma}")
print(f"A média dos números digitados é: {soma / 5}")
|
import time
import src.twitterscraper as ts
# pylint: disable=W0702, C0103
def test_search_term():
"""Check that search term function returns dataframe of tweets that contain term searched."""
twitterscraper = ts.Twitter()
search_parameters = ["CAPitol", "jury", "Cookie"]
for term in search_parameters:
try:
dataframe = twitterscraper.search_term(term)
direct_term_present = 0
no_direct_term_present = 0
direct_presense_ratio = 0
hasTerm = False
for ind in dataframe.index:
tweet = dataframe["full_text"][ind]
tweet_components = tweet.split(" ")
for element in tweet_components:
if term.lower() in element.lower():
hasTerm = True
if hasTerm is True:
direct_term_present += 1
else:
no_direct_term_present += 1
if no_direct_term_present == 0:
direct_presense_ratio = 1
else:
direct_presense_ratio = direct_term_present / no_direct_term_present
assert direct_presense_ratio > 0.9
except:
time.sleep(10)
def test_search_hastag():
"""Check that search term function returns dataframe of tweets containing hashtag with term"""
twitterscraper = ts.Twitter()
search_parameters = ["DACA", "Forthekids"]
for term in search_parameters:
try:
dataframe = twitterscraper.search_hashtag(term)
direct_term_present = 0
no_direct_term_present = 0
direct_presense_ratio = 0
hasTerm = False
target_term = "#" + term
for ind in dataframe.index:
tweet = dataframe["full_text"][ind]
tweet_components = tweet.split(" ")
for element in tweet_components:
if target_term.lower() in element.lower():
hasTerm = True
if hasTerm is True:
direct_term_present += 1
else:
no_direct_term_present += 1
if no_direct_term_present == 0:
direct_presense_ratio = 1
else:
direct_presense_ratio = direct_term_present / no_direct_term_present
assert direct_presense_ratio > 0.9
except:
time.sleep(10)
def test_search_user():
"""Check that search user properly returns tweets of/about a user when
given that user's user ID"""
twitterscraper = ts.Twitter()
search_parameters = ["AOC", "tedcruz", "BernieSanders", "Aly_Raisman"]
for term in search_parameters:
try:
dataframe = twitterscraper.search_user(term)
direct_term_present = 0
no_direct_term_present = 0
direct_presense_ratio = 0
hasTerm = False
for ind in dataframe.index:
tweet = dataframe["full_text"][ind]
username = dataframe["screen_name"][ind]
if term.lower() == username.lower():
hasTerm = True
tweet_components = tweet.split(" ")
for element in tweet_components:
if term.lower() in element.lower():
hasTerm = True
if hasTerm is True:
direct_term_present += 1
else:
no_direct_term_present += 1
if no_direct_term_present == 0:
direct_presense_ratio = 1
else:
direct_presense_ratio = direct_term_present / no_direct_term_present
assert direct_presense_ratio > 0.9
except:
time.sleep(10)
|
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
from operator import itemgetter
from skimage.draw import polygon
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( '__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
VOC_CLASSES = ( '__background__', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20')
# for making bounding boxes pretty
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0,5))
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
#cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res,bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class JACQUARDDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_sets, preproc=None, target_transform=AnnotationTransform(),
dataset_name='VOC0712'):
self.root = root
self.image_set = image_sets
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
self.ids = list()
for (year, name) in image_sets:
self._year = year
rootpath = os.path.join(self.root, 'VOC' + year)
for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
def __getitem__(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
#print(img.size())
# target = self.target_transform(target, width, height)
#print(target.shape)
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
# gt = self.target_transform(anno)
# return img_id[1], gt
if self.target_transform is not None:
anno = self.target_transform(anno)
return anno
def pull_img_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno)
height, width, _ = img.shape
boxes = gt[:,:-1]
labels = gt[:,-1]
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
labels = np.expand_dims(labels,1)
targets = np.hstack((boxes,labels))
return img, targets
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
self._write_voc_results_file(all_boxes)
aps,map = self._do_python_eval(output_dir)
return aps,map
def _get_voc_results_file_template(self):
filename = 'comp4_det_test' + '_{:s}.txt'
filedir = os.path.join(
self.root, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(VOC_CLASSES):
cls_ind = cls_ind
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.ids):
index = index[1]
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
rootpath = os.path.join(self.root, 'VOC' + self._year)
name = self.image_set[0][1]
annopath = os.path.join(
rootpath,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
rootpath,
'ImageSets',
'Main',
name+'.txt')
cachedir = os.path.join(self.root, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
detDB = {}
for i, cls in enumerate(VOC_CLASSES):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
detfile = filename.format(cls)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
for j in range(len(image_ids)):
im_loc = image_ids[j]
conf_loc = confidence[j]
bb_loc = BB[j, :]
if im_loc not in detDB:
detDB[im_loc] = []
bb_entry = [conf_loc, int(cls), bb_loc[0], bb_loc[1], bb_loc[2], bb_loc[3]] #confidence, class, xmin, ymin, xmax, ymax
detDB[im_loc].append(bb_entry)
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
total = 0
suc = 0
for im in imagenames:#foreach image
if im not in detDB:
print("No detections for image", im)
continue
bbDB = sorted(detDB[im], key=itemgetter(0), reverse=True)
bestBB = bbDB[0]
gtbbs = self.parse_rec(annopath.format(im))
max_iou = self.calc_max_iou(bestBB, gtbbs)
total += 1
if max_iou > 0.25:
suc += 1
if total % 100 == 0:
print(suc, total, suc/total)
acc = suc / total
print("FINAL ACCURACY", acc)
return acc, acc
def bb_to_corners(self, bb, angle_classes = 19):
corners = np.zeros((4, 2))
x = (bb[4] + bb[2]) / 2.0
y = (bb[5] + bb[3]) / 2.0
width = bb[4] - bb[2]
height = bb[5] - bb[3]
angle = (bb[1] - 1) / angle_classes * np.pi
corners = np.zeros((4, 2));
corners[0, 0] = -width / 2;
corners[0, 1] = height / 2;
corners[1, 0] = width / 2;
corners[1, 1] = height / 2;
corners[2, 0] = width / 2;
corners[2, 1] = -height / 2;
corners[3, 0] = -width / 2;
corners[3, 1] = -height / 2;
rot = [[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]
corners = np.dot(corners, rot)
corners = corners + np.array([x, y])
return corners, angle
def calc_max_iou(self, bb, gtbbs, visualize=False):
max_iou = 0
corners1, angle1 = self.bb_to_corners(bb)
if visualize:
img = np.zeros((1024, 1024, 3), np.uint8)
self.cv2corners(img, corners1, color=(0, 255, 0))
for i in range(len(gtbbs)):
gtbb = gtbbs[i]
gtbb = [1, int(gtbb['name']), gtbb['bbox'][0], gtbb['bbox'][1], gtbb['bbox'][2], gtbb['bbox'][3]]
corners2, angle2 = self.bb_to_corners(gtbb)
if visualize:
self.cv2corners(img, corners2)
if abs(angle2 - angle1) > np.pi / 6:
continue
iou = self.calc_iou(corners1, corners2)
max_iou = max(iou, max_iou)
if visualize:
print(max_iou)
cv2.imshow('result', img)
cv2.waitKey(0)
return max_iou
def calc_iou(self, corners1, corners2):
rr1, cc1 = polygon(corners1[:, 0], corners1[:, 1])
rr2, cc2 = polygon(corners2[:, 0], corners2[:, 1])
try:
r_max = max(rr1.max(), rr2.max()) + 1
c_max = max(cc1.max(), cc2.max()) + 1
except:
return 0
canvas = np.zeros((r_max, c_max))
canvas[rr1, cc1] += 1
canvas[rr2, cc2] += 1
union = np.sum(canvas > 0)
if union == 0:
return 0
intersection = np.sum(canvas == 2)
return intersection * 1.0 / union
def cv2corners(self, img, corners, color=(255, 0, 0)):
for i in range(4):
nextI = (i + 1) % 4
c1 = (int(corners[i, 0]), int(corners[i, 1]))
c2 = (int(corners[nextI, 0]), int(corners[nextI, 1]))
cv2.line(img, c1, c2, color, 3)
def parse_rec(self, filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
# obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def show(self, index):
img, target = self.__getitem__(index)
for obj in target:
obj = obj.astype(np.int)
cv2.rectangle(img, (obj[0], obj[1]), (obj[2], obj[3]), (255,0,0), 3)
cv2.imwrite('./image.jpg', img)
## test
# if __name__ == '__main__':
# ds = VOCDetection('../../../../../dataset/VOCdevkit/', [('2012', 'train')],
# None, AnnotationTransform())
# print(len(ds))
# img, target = ds[0]
# print(target)
# ds.show(1) |
#!/usr/bin/env python2
#
# This file is part of the dune-hdd project:
# https://github.com/pymor/dune-hdd
# Copyright Holders: Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import division, print_function
import numpy as np
from pymor.core.interfaces import ImmutableInterface
from pymor.reductors.basic import reduce_generic_rb
from pymor.playground.reductors import GenericBlockRBReconstructor
class DetailedEstimator(ImmutableInterface):
def __init__(self, example, wrapper, mu_hat, mu_bar):
self._example = example
self._wrapper = wrapper
self._mu_hat = mu_hat
self._mu_bar = mu_bar
def estimate(self, U_h, mu):
return self._example.estimate(U_h, 'eta_OS2014_*', self._mu_hat, self._mu_bar, mu)
class ReducedEstimator(object):
def __init__(self, discretization, example, wrapper, mu_hat, mu_bar, norm, compute_ids, return_id):
self._discretization = discretization
self._wrapper = wrapper
self._estimator = DetailedEstimator(example, wrapper, mu_hat, mu_bar)
self._norm = norm
self._compute_ids = compute_ids if isinstance(compute_ids, tuple) else (compute_ids,)
self._return_id = return_id
self.extension_step = -1
self.rc = None
self.data = {}
def add_to_data(self, key, mu, value):
if not self.data.has_key(self.extension_step):
self.data[self.extension_step] = {}
if not self.data[self.extension_step].has_key(key):
self.data[self.extension_step][key] = []
if not self.data[self.extension_step].has_key(key + '_mus'):
self.data[self.extension_step][key + '_mus'] = []
self.data[self.extension_step][key].append(value)
self.data[self.extension_step][key + '_mus'].append(mu)
def estimate(self, U, mu, discretization):
U_red = self.rc.reconstruct(U)
assert len(U_red) == 1
U_red_global = self._discretization.globalize_vectors(U_red)
U_red_dune = U_red_global._list[0]._impl
U_h = self._discretization.solve(mu)
U_h_global = self._discretization.globalize_vectors(U_h)
assert len(U_h_global) == 1
U_h_dune = U_h_global._list[0]._impl
# compute errors
example = self._estimator._example
mu_dune = self._wrapper.dune_parameter(mu)
mu_bar_dune = self._estimator._mu_bar
mu_hat_dune = self._estimator._mu_hat
if 'discretization_error' in self._compute_ids:
self.add_to_data('discretization_error', mu, example.compute_error(U_h_dune, 'elliptic', mu_dune, mu_bar_dune))
if 'full_error' in self._compute_ids:
self.add_to_data('full_error', mu, example.compute_error(U_red_dune, 'elliptic', mu_dune, mu_bar_dune))
if 'model_reduction_error' in self._compute_ids:
self.add_to_data('model_reduction_error', mu, self._norm(U_red - U_h)[0])
# compute estimates
alpha_mu_mu_bar = example.alpha(mu_dune, mu_bar_dune)
gamma_mu_mu_bar = example.gamma(mu_dune, mu_bar_dune)
alpha_mu_mu_hat = example.alpha(mu_dune, mu_hat_dune)
self.add_to_data('alpha_mu_mu_bar', mu, alpha_mu_mu_bar)
self.add_to_data('gamma_mu_mu_bar', mu, gamma_mu_mu_bar)
self.add_to_data('alpha_mu_mu_hat', mu, alpha_mu_mu_hat)
if 'eta_red' or 'eta_nc_red' in self._compute_ids:
eta_nc_red = example.estimate(U_red_dune, 'eta_NC_OS2014', mu_hat_dune, mu_bar_dune, mu_dune)
self.add_to_data('eta_nc_red', mu, eta_nc_red)
if 'eta_red' or 'eta_r_red' in self._compute_ids:
eta_r_red = example.estimate(U_red_dune, 'eta_R_OS2014_*', mu_hat_dune, mu_bar_dune, mu_dune)
self.add_to_data('eta_r_red', mu, eta_r_red)
if 'eta_red' or 'eta_df_red' in self._compute_ids:
eta_df_red = example.estimate(U_red_dune, 'eta_DF_OS2014_*', mu_hat_dune, mu_bar_dune, mu_dune)
self.add_to_data('eta_df_red', mu, eta_df_red)
if 'eta_red' in self._compute_ids:
eta_red = (1.0/np.sqrt(alpha_mu_mu_bar))*(np.sqrt(gamma_mu_mu_bar)*eta_nc_red
+ eta_r_red
+ (1.0/np.sqrt(alpha_mu_mu_hat))*eta_df_red)
self.add_to_data('eta_red', mu, eta_red)
assert self._return_id in self._compute_ids
return self.data[self.extension_step][self._return_id][-1]
def reduce_with_estimator(discretization,
RB,
operator_product=None,
vector_product=None,
disable_caching=True,
extends=None,
reduced_estimator=None):
assert operator_product is None
rd, _, reduction_data = reduce_generic_rb(discretization,
RB,
vector_product,
disable_caching,
extends)
rc = GenericBlockRBReconstructor(RB)
reduced_estimator.extension_step += 1
reduced_estimator.rc = rc
rd = rd.with_(estimator=reduced_estimator)
return rd, rc, reduction_data
|
import time
from gileum.test import MockGileum
setting = MockGileum(
test_name="unittest@gileum",
developer_name="jjj999",
current_time=time.time(),
glm_name="main",
)
|
import sys
import os
from numba.cuda.cudadrv.libs import test
from numba.cuda.cudadrv.nvvm import NVVM
def run_test():
if not test():
return False
nvvm = NVVM()
print("NVVM version", nvvm.get_version())
return nvvm.get_version() is not None
sys.exit(0 if run_test() else 1)
|
from django.contrib import admin
# Register your models here.
from yonghu.models import Yhb, Address
admin.site.register(Yhb)#注册显示用户表
admin.site.register(Address)
|
from toee import *
def OnBeginSpellCast(spell):
print "Wounding Whispers OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
#game.particles( "sp-divination-conjure", spell.caster )
def OnSpellEffect(spell):
print "Wounding Whispers OnSpellEffect"
spell.duration = 1 * spell.caster_level # 1 rnd/cl
spellTarget = spell.target_list[0]
spellTarget.obj.condition_add_with_args('sp-Wounding Whispers', spell.id, spell.duration)
spellTarget.partsys_id = game.particles('sp-True Strike', spellTarget.obj)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Wounding Whispers OnBeginRound"
def OnEndSpellCast(spell):
print "Wounding Whispers OnEndSpellCast" |
#!python2.7
# -*- coding: utf-8 -*-
"""
Created by kun on 2016/7/21.
"""
from math import sqrt
__author__ = 'kun'
critics = {'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0, 'Superman Returns': 4.0}}
def sim_pearson(prefs, p1, p2): # Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]:
si[item] = 1
if len(si) == 0: # if they are no ratings in common, return 0
return 0
n = len(si) # Sum calculations
sum1 = sum([prefs[p1][it] for it in si]) # Sums of all the preferences
sum2 = sum([prefs[p2][it] for it in si])
sum1Sq = sum([pow(prefs[p1][it], 2) for it in si]) # Sums of the squares
sum2Sq = sum([pow(prefs[p2][it], 2) for it in si])
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in si]) # Sum of the products
num = pSum - (sum1 * sum2 / n) # Calculate r (Pearson score)
den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n))
if den == 0:
return 0
r = num / den
return r
print(sim_pearson(critics, 'Lisa Rose', 'Gene Seymour'))
|
"""
@name: PyHouse/src/Modules.Core.Utilities.uuid_tools.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2015-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 22, 2015
@Summary:
"""
__updated__ = '2019-10-24'
# Import system type stuff
import os
import uuid
# Import PyMh files
# from Modules.Core.data_objects import UuidData
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.UuidTools ')
def _file_name(p_pyhouse_obj, p_file_name):
""" Find the name of the file we will be using.
"""
l_file = 'xxx' # os.path.join(p_pyhouse_obj._Config.ConfigDir, 'Uuid', p_file_name)
return l_file
def get_uuid_file(p_pyhouse_obj, p_file_name):
""" get the uuid for the file if it exists OR create the file with a persistent UUID if needed.
"""
l_file_name = _file_name(p_pyhouse_obj, p_file_name + '.uuid')
try:
l_file = open(l_file_name, mode='r')
l_uuid = l_file.read()
except IOError:
l_uuid = Uuid.create_uuid()
l_file = open(l_file_name, mode='w')
l_file.write (l_uuid)
l_file.close()
return l_uuid
class Uuid:
@staticmethod
def create_uuid():
""" Create a new Type 1 UUID.
"""
return str(uuid.uuid1())
@staticmethod
def make_valid(p_uuid):
"""
Preserve the UUID if it is present.
If UUID id not 36 bytes, return a correctly generated uuid.
@param p_uuid: a string holding a UUID
"""
try:
if len(p_uuid) != 36:
p_uuid = Uuid.create_uuid()
LOG.error('Invalid UUID found (1) - Creating a new one.')
except TypeError:
p_uuid = Uuid.create_uuid()
LOG.error('Invalid UUID found (2) - Creating a new one.')
return p_uuid
@staticmethod
def add_uuid(p_pyhouse_obj, p_uuid_obj):
""" Add the given UuidData() object to PyHouse.
"""
l_uuid = p_uuid_obj.UUID
if l_uuid in p_pyhouse_obj._Uuids.All:
LOG.info('Duplicate UUIDs Detected. Old:{} New:{}'.format(
p_pyhouse_obj._Uuids.All[l_uuid].UuidType, p_uuid_obj.UuidType))
p_pyhouse_obj._Uuids.All[l_uuid] = p_uuid_obj
# ## END DBK
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trainer', '0008_solution_time_elapsed'),
]
operations = [
migrations.AddField(
model_name='user',
name='counter',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='counter_correct',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='counter_wrong',
field=models.IntegerField(default=0),
),
]
|
import datetime
from base64 import b64encode
from decimal import Decimal
import pytest
from local_data_api.models import Field, SqlParameter
def test_valid_field() -> None:
assert SqlParameter(name='abc', value=Field(stringValue='abc')).valid_value == 'abc'
assert SqlParameter(name='abc', value=Field(blobValue='abc')).valid_value == 'abc'
assert SqlParameter(name='abc', value=Field(doubleValue=0.1)).valid_value == 0.1
assert SqlParameter(name='abc', value=Field(isNull=True)).valid_value is None
assert SqlParameter(name='abc', value=Field(longValue=123)).valid_value == 123
assert SqlParameter(name='abc', value=Field(longValue=123)).valid_value == 123
assert SqlParameter(name='abc', value=Field()).valid_value is None
assert (
SqlParameter(
name='abc', value=Field(stringValue='123456789'), typeHint='DECIMAL'
).valid_value
== '123456789'
)
assert (
SqlParameter(
name='abc',
value=Field(stringValue='2020-02-27 00:30:15.290'),
typeHint='TIMESTAMP',
).valid_value
== '2020-02-27 00:30:15.290'
)
assert (
SqlParameter(
name='abc', value=Field(stringValue='00:30:15.290'), typeHint='TIME'
).valid_value
== '00:30:15.290'
)
assert (
SqlParameter(
name='abc', value=Field(stringValue='2020-02-27'), typeHint='DATE'
).valid_value
== '2020-02-27'
)
def test_from_value() -> None:
assert Field.from_value('str') == Field(stringValue='str')
assert Field.from_value(123) == Field(longValue=123)
assert Field.from_value(1.23) == Field(doubleValue=1.23)
assert Field.from_value(True) == Field(booleanValue=True)
assert Field.from_value(False) == Field(booleanValue=False)
assert Field.from_value(b'bytes') == Field(blobValue=b64encode(b'bytes'))
assert Field.from_value(None) == Field(isNull=True)
class JavaUUID:
def __init__(self, val: str):
self._val: str = val
def __str__(self) -> str:
return self._val
uuid = 'e9e1df6b-c6d3-4a34-9227-c27056d596c6'
assert Field.from_value(JavaUUID(uuid)) == Field(stringValue=uuid)
class PGobject:
def __init__(self, val: str):
self._val: str = val
def __str__(self) -> str:
return self._val
assert Field.from_value(PGobject("{}")) == Field(stringValue="{}")
class BigInteger:
def __init__(self, val: int):
self._val: int = val
def __str__(self) -> int:
return self._val
assert Field.from_value(BigInteger("55")) == Field(longValue=55)
class Dummy:
pass
with pytest.raises(Exception):
Field.from_value(Dummy())
|
from time import time as time_now
from PyQt4 import QtCore
from PySide import QtGui
import dictionaries.session as session
import dictionaries.constants as cs
import dictionaries.menus as menus
import interface.auxiliary_functions as auxi
HLINE1 = 5 * "|"
HLINE2 = "\n" + 9 * HLINE1 + "\n"
################################################################################
class VirtualInstrumentPanel(QtGui.QWidget):
"""
When a VIP is instantiated, e.g. from the VIP_main.py, this class is called
all it requires all other VIP modules. This file consists of:
- Initialization of data and building of the GUI
- The definition of the VIP Application Programming Interface (API)
"""
def __init__(self):
"""
Call 4 different subroutines that do the following:
- Initialize several variables and dictionaries
- Build all the GUI tabs with their layouts specifications
- Build and specify the layout of the main VIP window itself
- Initialize all the GUI class instances, such as e.g. the plot window
Finally, create and set the layout of the VIP main window.
"""
### Print a bunch of "|"'s so that we see the VIP initialization in the
### editor/terminal.
print HLINE2+HLINE1
### 'super(CLASSNAME, self)' returns the parent class. The VIP class is
### a child of QtGui.QWidget and we ought to call its __init__ method.
super(VirtualInstrumentPanel, self).__init__()
self.__initialize_session_handles()
self.__initialize_content_handles()
from interface.session_widgets import _build_all
_build_all(self)
self.__initialize_GUI_windows()
### Create and set the horizontal Box (containing the 3 columns of the main window)
hBox = self._make_main_hBox()
self.setLayout(hBox)
### Set the style as specified in the constants.py file
self.__adopt_style()
print "\n\nPlease ignore all the possible layout and stylesheet " + \
"complainsts from the Python Qt package.\n\n"
def __initialize_session_handles(self):
"""Initialize several variables and dictionaries"""
########## measurement
### This '_TIC' time value is merely used to compute the runtime.
self._TIC = time_now()
### Many QWidget windows are given the following attribute to make it
### possible to exclude them form the screenshot routine.
self._B_can_make_screenshot = True
### The follwing boolean enables breaking from the measurement loop:
### The event associated with the STOP button in the GUI sets it to 'True'.
self.Bpy_break_loop = False
### If the follwing boolean is set to 'False', the first sweep will never be repeated.
### (See measurement file)
self.Bpy_redo_sweep = True
### If the follwing boolean is set to 'True', the VIP can't be closed mannually.
self.B_cannot_be_closed = False
### The result handle is used to store results before they are written
### to a text file.
### The following are a dictionary of booleans that can be associated
### with particular session keys.
self.B_auxiliary = {'Freq. trace' : {'R_freq_start' : False
,'R_freq_stop' : False
}
,'Time trace' : {'R_freq_start' : False
,'R_freq_stop' : False
}
}
self.result = "INIT"
### This is the internal handle for storing all of the VIP-GUI's current settings.
import copy
self._session = copy.deepcopy(session.default)
###
self._sessions_local = {str(k) : copy.deepcopy(session.default) for k in range(cs.SESSION_DICT_INDICES)}
for k in ['default', 'test_1', 'test_2']:
self._sessions_local[k] = copy.deepcopy(session.default)
### This is the handle for all the insturment driver class instances.
### Given the connect checkboxes of the respective instruments are checked,
### the instruments drivers are looked up in the driver dict and assigned
### when the CONNECT button is pressed.
self.instruments = {instr_name : "INIT <"+instr_name+"> driver" for instr_name in session.instr_list}
### The VIP has 8 plots, and they are rendered in a plot canvas class instance
### which is assigned to this handle in the 'build_all' call
self.Canvas = {sk : {} for sk in sorted(session.Plot.keys())}
### The VIP sweep_tracker attribute is created here, which, e.g. in
### scripts can be used to keep track in which point of the measurment
### for-loop you are.
self.reset_sweep_tracker()
### Create a handle with 1000 or so HBoxes with an empty text label
def _text_box_generator(n, text):
for _ in range(n):
HBox = QtGui.QHBoxLayout()
HBox.addWidget(QtGui.QLabel(text))
yield HBox
self._blanks = _text_box_generator(1000, " ")
import dictionaries.plot_data as plot_data
### This handle contains plot data and the default is taken from the
### plot_data folder.
self.plot_data = plot_data.default_data
def __initialize_content_handles(self):
""""Initialize dictionaries that are filled with data when 'build_all' is called.
Here are some shorts for Qwidgets class instances that are more or
less standard. I use them as keys for dictionaries of widgets.
* QW_KEYS, QW_KEYS_plus, Legend:
lb ... label (Static text on the GUI surface that can not be edited by the user.)
cb ... checkbox (A GUI checkbox, an object that can be in one of two states.)
dm ... dropdown menu (An object that can be in one of finitely many states.)
bn ... button (A GUI button that can be clicked)
le ... line edit (A GUI text field that can be edited)
tb ... tab QWidget
qw ... QWidget parent class. (The above are children of that one.)
My convention for setting keys that are edited by such widgets is
that I start them with a capital letter that hint at the range of
values (it's like a tpye). For example:
'R_freq_start', 'N_sweep_points', 'F_unit_freq'
We have
S, B, F, N and R
For singleton, boolean, finite set, natural number and real number.
* CONTENT_KEYS, Legend:
events ... functions that are called when some widget is triggered
(e.g. when a button is pressed or some line edit field is changed)
captions ... captions that are required for some widgets
(e.g. the text that's written on a button or next to a checkbox)
cb_vals ... the two values associated with a checked or un-checked
checkbox (e.g. ON and OFF, EXT and INT, etc.)
dm_vals ... List of values of a dropdown menu
(e.g. FREQUENCY_UNITS = ['Hz', 'kHz', 'MHz', 'GHz'], as defined in
the constants.py file)
"""
QW_KEYS = ['le', 'cb', 'dm', 'bn', 'lb']
QW_KEYS_plus = ['tb', 'qw']
CONTENT_KEYS = ['events', 'captions', 'cb_vals', 'dm_vals']
TAB_KEYS = session.default.keys()
### These dictionaries are filled with data later
self.content = {k : {sk : {} for sk in TAB_KEYS} for k in CONTENT_KEYS}
self.auxiliary_le = {sk : {} for sk in TAB_KEYS}
self._qWidgets = {k : {} for k in QW_KEYS+QW_KEYS_plus}
for sk in TAB_KEYS:
for k in QW_KEYS:
self._qWidgets[k][sk] = {}
self._qWidgets['qw'][sk] = QtGui.QWidget()
self._qWidgets['tb'][sk] = QtGui.QTabWidget()
### Create all the tab widgets from the session structure and group them
### accordingly. Here 'sup_cla' denotes the super classes, which is actually
### not part of the default dictionary, but rather the more detauled
### 'Tree', see the session.py file in the dictionaries folder.
for sup_cla in session.Tree.keys():
### The '_Experiment' vbox widgets are not grouped in as a tab somwhere
if sup_cla == '_Experiment':
pass
else:
self._qWidgets['tb'][sup_cla] = QtGui.QTabWidget()
for cla in session.Tree[sup_cla].keys():
self._qWidgets['tb'][cla] = QtGui.QTabWidget()
for sk in session.Tree[sup_cla][cla].keys():
self._qWidgets['tb'][cla].addTab(self._qWidgets['qw'][sk], sk)
### Create the measurement insturment class widget here:
if sup_cla in session.instr_fine_grained.keys():
self._qWidgets['tb'][sup_cla].addTab(self._qWidgets['tb'][cla], cla)
################################################################################
def __initialize_GUI_windows(self):
"""Build the 4 kinds of QWidget windows loaded from the 'widgets' folder.
When the VIP is open, they are always there an cann be opened for the
GUI with their respective buttons on the VIP main window.
Assgin a reference keyword to all plot columns.
"""
self._ProgressBar = QtGui.QProgressBar(self)
from widgets.FeedbackWindow_Qwidget import FeedbackWindow
from widgets.ScriptsWindow_Qwidget import ScriptsWindow
from widgets.OptionsWindow_Qwidget import OptionsWindow
from widgets.InvisibleWindow_Qwidget import InvisibleWindow
from widgets.PlotsWindow_Qwidget import PlotsWindow
self._FeedbackWindow = FeedbackWindow(self)
self.ScriptsWindow = ScriptsWindow(self)
self._OptionsWindow = OptionsWindow(self)
self._InvisibleWindow = InvisibleWindow(self)
self._PlotsWindow_12 = PlotsWindow(self, ['Plot_column_1', 'Plot_column_2'])
self._PlotsWindow_34 = PlotsWindow(self, ['Plot_column_3', 'Plot_column_4'])
### The self._NotesWindow widget is actually always freshly instantiated
### when the "Notes" button is clicked.
def _make_main_hBox(self):
"""Build the layout of the main VIP widget window.
A QHBoxLayout instance is a GUI surface to which one can other
widgets, horizonally, one after the other.
"""
########## hBox_progress_bar
hBox_progress_bar = QtGui.QHBoxLayout()
### Pop a blank line widget and add it to the hBox
hBox_progress_bar.addLayout(self._blanks.next())
### Add the VIP's QProgressBar to the hBox
### It's going to be used to visualize progress in the measurement loop
### in the 'measurement' file, in the 'interface' folder.
hBox_progress_bar.addWidget(self._ProgressBar)
hBox_progress_bar.addLayout(self._blanks.next())
########## hBox_trace_lb
text = "From trace:"
trace_lb = QtGui.QLabel(text)
trace_lb.setFont(QtGui.QFont(cs.FONT, cs.FONTSIZE+1))
trace_lb.adjustSize() ### This line may be redundant.
### We add a second label in small font that has explanatory function.
text = "(Sweep 1 done by measurement instrument itself)"
trace_lb_info = QtGui.QLabel(text)
trace_lb_info.setFont(QtGui.QFont(cs.FONT, cs.FONTSIZE-4))
trace_lb_info.adjustSize() ### This line may be redundant.
hBox_trace_lb = QtGui.QHBoxLayout()
hBox_trace_lb.addWidget(trace_lb)
hBox_trace_lb.addWidget(trace_lb_info)
### addStretch pushes the first to widgets in the box to the very top
### and makes it so that the box rescales
hBox_trace_lb.addStretch(1)
########## hBox_point_label
text = "(Otherwise only script)"
point_label_info = QtGui.QLabel(text)
point_label_info.setFont(QtGui.QFont(cs.FONT, cs.FONTSIZE-4))
point_label_info.adjustSize() ### This line may be redundant.
hBox_point_label = QtGui.QHBoxLayout()
### The boolean 'B_during_sweep' value determines if a measurement
### should be done during the sweep. Deactivating the measurement allows
### for merely sweeping over scripts.
hBox_point_label.addWidget(self._qWidgets['cb']['Meas_main']['B_during_sweep_1'])
hBox_point_label.addWidget(point_label_info)
hBox_point_label.addStretch(1)
########## vBoxs
### Create and fill the three columns (as vBoxes) of the VIP main window.
### The VIP._qWidgets values were created in the '.build_all' call.
### The 'vBoxs' dictionary here is a local one (as opposed to an attribute
### of the VIP) and so the three keys are throwaway references that are
### not used later. This is why I use a prefix underline.
vBox_KEYS = ['_control', '_measure', '_dosweep']
vBoxs = {}
for k in vBox_KEYS:
vBoxs[k] = QtGui.QVBoxLayout()
vBoxs[k].addStretch(.1)
vBoxs['_control'].addWidget(self._qWidgets['qw']['Results'])
vBoxs['_control'].addStretch(.1)
vBoxs['_control'].addWidget(self._qWidgets['qw']['Session'])
vBoxs['_control'].addStretch(.1)
vBoxs['_control'].addWidget(self._instrument_qw_box)
vBoxs['_control'].addLayout(self._blanks.next())
vBoxs['_control'].addStretch(.1)
vBoxs['_control'].addWidget(self._qWidgets['tb']['_Source_Instr'])
vBoxs['_measure'].addLayout(self._blanks.next())
vBoxs['_measure'].addWidget(self._qWidgets['tb']['Sweep_1'])
vBoxs['_measure'].addLayout(hBox_point_label)
vBoxs['_measure'].addWidget(self._qWidgets['tb']['Points'])
vBoxs['_measure'].addStretch(.1)
vBoxs['_measure'].addLayout(hBox_trace_lb)
vBoxs['_measure'].addWidget(self._qWidgets['tb']['Traces'])
vBoxs['_measure'].addStretch(.1)
vBoxs['_measure'].addWidget(self._qWidgets['tb']['_Meas_Instr'])
vBoxs['_dosweep'].addLayout(self._blanks.next())
vBoxs['_dosweep'].addStretch(.1)
vBoxs['_dosweep'].addLayout(hBox_progress_bar)
vBoxs['_dosweep'].addStretch(.1)
vBoxs['_dosweep'].addWidget(self._qWidgets['qw']['Sweep'])
vBoxs['_dosweep'].addLayout(self._blanks.next())
vBoxs['_dosweep'].addWidget(self._qWidgets['tb']['Sweep_2'])
vBoxs['_dosweep'].addWidget(self._qWidgets['tb']['Sweep_3'])
vBoxs['_dosweep'].addStretch(.1)
vBoxs['_dosweep'].addWidget(self._popup_window_qw_box)
########## hBox
### Create a large hBox and put the three vBoxes into it.
hBox = QtGui.QHBoxLayout()
hBox.addLayout(self._blanks.next())
hBox.addStretch(1)
for k in vBox_KEYS:
hBox.addLayout(vBoxs[k])
hBox.addLayout(self._blanks.next())
hBox.addStretch(1)
return hBox
def __adopt_style(self):
### At initialization, he might complain that he can't parse stylesheets
### of some widgets. I assume it has to do with the buttons or so, but
### it doesn't matter.
self._popup_window_qw_box.setStyleSheet(cs.STYLE_control)
for k in ['Results', 'Session']:
self._qWidgets['qw'][k].setStyleSheet(cs.STYLE_control)
self._instrument_qw_box.setStyleSheet(cs.STYLE_instruments)
for k in ['_Source_Instr', '_Meas_Instr']:
self._qWidgets['tb'][k].setStyleSheet(cs.STYLE_instruments)
self._qWidgets['qw']['Sweep'].setStyleSheet(cs.STYLE_sweeps)
for k in ['Sweep_1', 'Sweep_2', 'Sweep_3']:
self._qWidgets['tb'][k].setStyleSheet(cs.STYLE_sweeps)
self.setStyleSheet(cs.STYLE_VIP)
### Load some settings from the constants.py file and set them for the VIP
self.move(*cs.MOVE_VIP)
self.resize(*cs.RESIZE_VIP)
self.setWindowTitle(cs.WINDOW_TITLE_VIP)
################################################################################ VIP-API
def get(self, sk, k = None):
"""Return thesession dictonaries d for a session key 'sk' or a
particular value of it, d[k].
"""
try:
r = self._session[sk]
if k != None:
r = r[k]
return r
except KeyError as exception:
print "!!! (.get):\n{0}\n.get will liekly return 'None'.".format(exception)
def set(self, sk, settings):
"""Using the dictionary 'settings', update the dictonary d with the
session key 'sk'.
"""
### settings has format {'k1' : v1, 'k2' : v2, ...}
### where the values v must be convertible to strings for the command
for k, v in settings.iteritems():
v = str(v)
self._session[sk][k] = v
### Note: There are some session values that are not loaded
### into any widget.
try:
if k in self._qWidgets['le'][sk].keys():
self._qWidgets['le'][sk][k].setText(v)
elif k in self._qWidgets['dm'][sk].keys():
auxi.set_dm(self._qWidgets['dm'][sk][k], v)
self._session[sk][k] = v
elif k in self._qWidgets['cb'][sk].keys():
on_off_pair = self.content['cb_vals'][sk][k]
auxi.set_cb(self._qWidgets['cb'][sk][k], on_off_pair, v)
#auxi.sourced_print("("+sk+"), ignored "+k+", "+v)
except KeyError:
auxi.sourced_print("("+sk+"), KeyError exception for "+k)
def adopt_session(self, session):
for sk, settings in session.iteritems():
self.set(sk, settings)
print sk
print len(settings)
#vip.update_figures()
auxi.sourced_print("called.")
def is_connected(self, instr_name):
"""Return a boolean 'B_is_connected' that is true if the insturment with
name 'instr_name' has its connect checkbox set positive, and if its
corresponding VIP attribute '.instruments[instr_name]' has a driver
associated with it. This is done by checking if the attribute has a
method 'get_session_index()'.
"""
### Assume the instrument is connected and then check if your're wrong.
B_is_connected = False
if self.get(instr_name, 'B_connect') == 'TRY':
try:
session_index = self.instruments[instr_name].get_session_index()
message = instr_name+": "+"is connected! Session index: "+str(session_index)
self.GUI_feedback(message)
### If the routine got thus far, we can be confident that the
### instrument is connected.
B_is_connected = True
except AttributeError:
### This exception will be thrown if 'self.instruments[instr_name]'
### does not have a method 'get_session_index()'
message = instr_name+": is not connected! (get_session_index fail)"
print message
else:
pass # message = instr_name+": Connection checkbox not even set to TRY."
return B_is_connected
def GUI_feedback(self, message):
"""Write 'message' to the Widget window, 'self._FeedbackWindow'"""
self._FeedbackWindow.update(self, message)
### Also print the 'message' to the editor/terminal. he function
### 'sourced_print' is just 'print' with a header that tells us the
### function it has been called from (in this case, 'GUI_feedback')
auxi.sourced_print(message)
def runtime(self):
"""Return the runtime of the VIP, i.e. the passed time since 'self._TIC'
has been initialized via the VIP's __init__ method."""
runtime_string_seconds = auxi.toc(self._TIC)
return "\nVIP session runtime:\n{0}\n".format(runtime_string_seconds)
def update_figures(self, dim = None):
"""The Canvas class instances have a 'update_figure' method.Depending
on the session values and the argument 'dim', update the VIP plots.
"""
for sk in session.Plot.keys():
if self.get(sk, 'B_keep_updated') == 'ON':
if (dim == None) or (dim == '2d_data'):
self.Canvas[sk]['2d_data'].update_figure(sk, self)
if (dim == None) or (dim == '3d_data'):
self.Canvas[sk]['3d_data'].update_figure(sk, self)
### Finally, process all open Qt operations.
QtCore.QCoreApplication.processEvents()
#auxi.sourced_print("called.")
def reset_sweep_tracker(self):
"""Set the VIP 'attribute self.sweep_tracker' to a dictionary that has
the value '0' for all three measurement loop references, 1, 2 and 3.
"""
self.sweep_tracker = {k_data : {str(i) : 0 for i in [1, 2, 3]} for k_data in menus.DATA_SET_KEYS}
#auxi.sourced_print("called.")
################################################################################ closeEvent
def closeEvent(self, evnt):
"""'closeEvent' is the name of a QWidget build in method that is called
whenever the widget is closed. In the case that the VIP is closed, we
want to close all other open widgets as well.
"""
self._PlotsWindow_12.close()
self._PlotsWindow_34.close()
self._FeedbackWindow.close()
self.ScriptsWindow.close()
self._OptionsWindow.close()
try:
self._NotesWindow.close()
except AttributeError:
pass
### Actively remove from instrument to be connected:
#for sk in ['H3344_1']:
# self.set(sk, {'B_connect' : 'DONT'})
#import interface.session_events as events
#events.bn_connect_to_lab(self)
try:
from drivers.DriverH3344_AWG import DriverH3344 as _DriverH3344
_DriverH3344._clear_channels()
except (NotImplementedError, ImportError, TypeError) as exception: ### except WindowsError
print "! (VirtualInstrumentPanel, closeEvent) Exception for DriverH3344_AWG:"
print str(exception)
### The boolean attribute VIP.B_cannot_be_closed defined above in this
### class makes it so that we can choose the VIP window to be indestrutable.
if self.B_cannot_be_closed is True:
evnt.ignore()
self.setWindowState(QtCore.Qt.WindowMinimized)
message = "The VIP is protected from being closed.\nRestart the Kernel to kill it."
else:
super(VirtualInstrumentPanel, self).closeEvent(evnt)
message = "\nVIP was closed.\n"
### When the VIP closes, we report back with the total runtime.
message += self.runtime()
self.GUI_feedback(message)
### Print a bunch of "|"'s so that we see we are again allowed to use
### the editor/terminal.
print HLINE1+HLINE2
|
from mock import patch
from pinkbelt.messages import post_message
@patch("pinkbelt.messages.slack_post_message")
def test_post_message(mock_post_message):
post_message('test', 'room')
assert mock_post_message.called, True
assert mock_post_message.call_args_list, ['test', 'room']
|
## Word a10n (abbreviation)
## 6 kyu
## https://www.codewars.com//kata/5375f921003bf62192000746
import re
def abbreviate(s):
s_list = re.split('([\W\d_])', s)
end_list = []
for i in s_list:
if i:
if len(i) >= 4:
end_list.append(f'{i[0]}{len(i)-2}{i[-1]}')
elif len(i) < 4:
end_list.append(i)
return ''.join(end_list) |
###########################################
#IMPORTING
#Imports OS
import os
#Needed for sleep
import time
#GUI
import tkinter as tk
#Import filename
from tkinter import filedialog
#Import filename
from tkinter import messagebox
#Import py file (needs __init__.py)
from Gifhandler import *
#Tabs
from tkinter import ttk
#To run shit in background
import subprocess
#Startup
import winshell
#To close
import sys
###########################################
#CHOOSING FOLDERS
#Finds folder location
cwd = os.getcwd()
#saves root cwd
rootcwd=cwd
#Goes to script folder
os.chdir(cwd)
#Checks datafolder
if not os.path.isdir(cwd+'/'+'data'):
os.mkdir(cwd+'/'+'data')
###########################################
#Loading
#Main window
top=tk.Tk()
#Hide
top.withdraw()
#Loadwindow
loadwindow = tk.Toplevel(top)
#Icon
loadwindow.iconbitmap('gifs/zergyicon.ico')
#Setting color
loadwindow.configure(background='gold')
#Title
loadwindow.title('LOADING')
#Fixing picture canvas
loadcanvas=tk.Canvas(loadwindow,width=250,height=250,background='gold')
loadcanvas.pack()
#Open gif
loadanimation=Gifhandler(loadwindow,loadcanvas,'gifs/running.gif',40)
loadanimation.animate()
#Loadingtext
loadstring=tk.StringVar()
loadtext = tk.Label(master=loadwindow,textvariable=loadstring)
loadtext.configure(background='gold')
loadtext.config(font=("Georgia", 26))
loadtext.pack()
loadstring.set('Loading: \nDefinitions')
###########################################
#Definitions
#Wait function, that makes tkinter still run, #MASTAAAPIECE
def waitfunc(delay):
_waitwindow=tk.Toplevel()
_waitwindow.withdraw()
_starttime=time.time()
_stoptime=_starttime
while (int(_stoptime)-int(_starttime))<delay:
_waitwindow.update()
_stoptime=time.time()
_waitwindow.destroy()
def runzergy():
global cwd
url=E1T1.get()
f=open('data/url.txt','w')
f.write(url)
f.close()
#Close GUI
#top.destroy()
if 'Yes' in Settings[3]:
startupfolder = winshell.startup()
winshell.CreateShortcut(Path=(startupfolder+'/'+'Zergyshortcut.lnk'), Target=(cwd+'/'+'HTMLfetch.exe'))
try:
subprocess.Popen(['HTMLfetch.exe'],stdout=subprocess.PIPE,creationflags=0x08000000)
except:
os.system('HTMLfetch.py')
def quitcommand():
try:
os._exit(0)
except:
quit()
def delete_startup():
global cwd
startupfolder = winshell.startup()
os.remove(startupfolder+'/'+'Zergyshortcut.lnk')
B2T4.config(text='Deleted',bg='deep sky blue')
waitfunc(1)
B2T4.config(text='Delete autostart',bg='red')
def traincommand():
Question=E1T3.get()
Answer=E2T3.get()
f=open('data/Training.txt','a')
f.write(Question+';'+Answer+'\n')
f.close()
B1T3.config(text='Learned!',bg='deep sky blue')
waitfunc(1)
B1T3.config(text='Train',bg='orange')
def opentrain():
os.system('start '+r'data/Training.txt')
def writedetails():
global E1T2,E2T2,E3T2,E4T2,E5T2,E6T2,E7T2,E8T2
varlist=[E1T2,E2T2,E3T2,E4T2,E5T2,E6T2,E7T2,E8T2]
Details=[]
for i in range(0,8):
_temp=varlist[i].get()
Details.append(_temp)
f=open('data/Details.txt','w')
for i in Details:
f.write(i+'\n')
f.close()
B1T2.config(text='Written!',bg='deep sky blue')
waitfunc(1)
B1T2.config(text='Write to file',bg='orange')
def writesettings():
global font
global O1T4_stringvar,S1T4, S2T4,O2T4_stringvar,O3T4_stringvar
varlist=[O1T4_stringvar,S1T4,S2T4,O2T4_stringvar,O3T4_stringvar]
Settings=[]
for i in varlist:
Settings.append(i.get())
f=open('data/Settings.txt','w')
for i in Settings:
f.write(str(i)+'\n')
f.close()
B1T4.config(text='Written!',font=font,bg='deep sky blue')
waitfunc(1)
B1T4.config(text='Write to file',font=font,bg='orange')
def runwaitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
mainanimation.animate()
def runbuggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/bug.gif',30)
mainanimation.animate_noloop()
def runtraingif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/train.gif',80)
mainanimation.animate()
def rungotitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/gotit.gif',30,200,130)
mainanimation.animate_noloop()
def runboomgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/boom.gif',40,200,130)
mainanimation.animate()
def runburrowgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/burrow.gif',30,100,130)
mainanimation.animate()
def checktab():
global ttkNote
currenttab=ttkNote.index('current')
if currenttab==0 or currenttab==1:
runwaitgif()
elif currenttab==2:
runtraingif()
elif currenttab==3:
runbuggif()
elif currenttab==4:
rungotitgif()
elif currenttab==5:
runburrowgif()
###########################################
#GUI window
#Update
waitfunc(0.1)
loadstring.set('Loading: \nMain window')
#Icon
top.iconbitmap('gifs/zergyicon.ico')
#Setting color
top.configure(background='gold')
#Title
top.title('Zergy')
#Fixing picture canvas (will load later)
topcanvas=tk.Canvas(top,width=250,height=250,background='gold')
topcanvas.pack()
#Makes it not rezisable
top.resizable(width=False, height=False)
###########################################
#Create tabs
#Tabs
ttkNote=ttk.Notebook(top)
frame1=tk.Frame(ttkNote,bg='gold')
frame2=tk.Frame(ttkNote,bg='gold')
frame3=tk.Frame(ttkNote,bg='gold')
frame4=tk.Frame(ttkNote,bg='gold')
frame5=tk.Frame(ttkNote,bg='gold')
frame6=tk.Frame(ttkNote,bg='gold')
ttkNote.add(frame1,text='Start')
ttkNote.add(frame2,text='Details')
ttkNote.add(frame3,text='Training')
ttkNote.add(frame4,text='Settings')
ttkNote.add(frame5,text='Info')
ttkNote.add(frame6,text=' ') #Hidden
tab1=ttkNote.index('current')
ttkNote.pack()
###########################################
#Load default text
#Update
waitfunc(0.1)
loadstring.set('Loading: \nData')
#DONT FORGET DEFAULT TEXT IN ENTRIES!
#Details
try:
Details=[]
f=open('data/Details.txt','r')
_lines=f.readlines()
Details.append([i.replace('\n','') for i in _lines])
Details=Details[0] #Because list in list
f.close()
except:
Details=[['']*9]
Details=Details[0]
#Settings
try:
Settings=[]
f=open('data/Settings.txt','r')
_lines=f.readlines()
Settings.append([i.replace('\n','') for i in _lines])
Settings=Settings[0] #Because list in list
f.close()
except:
Settings=['Manual: Fill',5,0,'No','No']
###########################################
#Entries
#Update
waitfunc(0.1)
loadstring.set('Loading: \nMain GUI')
font=('Georgia',12)
foreground='black'
background='gold'
#TAB1
E1T1 = tk.Entry(frame1)
L1T1 = tk.Label(frame1,text='URL:',font=font,fg=foreground,bg=background)
B1T1 = tk.Button(frame1, text="Start", command=runzergy,bg='orange')
B2T1 = tk.Button(frame1, text="Quit", command=quitcommand,bg='red')
#TAB2
for i in range(1,9):
vars()['E'+str(i)+'T2']=tk.Entry(frame2)
vars()['E'+str(i)+'T2'].insert(0,Details[i-1])
L1T2 = tk.Label(frame2,text='First Name:',font=font,fg=foreground,bg=background)
L2T2 = tk.Label(frame2,text='Surname:',font=font,fg=foreground,bg=background)
L3T2 = tk.Label(frame2,text='ZIP code:',font=font,fg=foreground,bg=background)
L4T2 = tk.Label(frame2,text='Adress:',font=font,fg=foreground,bg=background)
L5T2 = tk.Label(frame2,text='Mailing Adress:',font=font,fg=foreground,bg=background)
L6T2 = tk.Label(frame2,text='"Ovvenamn":',font=font,fg=foreground,bg=background)
L7T2 = tk.Label(frame2,text='Shipping:',font=font,fg=foreground,bg=background)
L8T2 = tk.Label(frame2,text='Amount:',font=font,fg=foreground,bg=background)
B1T2 = tk.Button(frame2, text="Write to file", font=font, command=writedetails,bg='orange')
#TAB3
E1T3 = tk.Entry(frame3)
E2T3 = tk.Entry(frame3)
L1T3 = tk.Label(frame3,text='Question:',font=font,fg=foreground,bg=background)
L2T3 = tk.Label(frame3,text='Answer:',font=font,fg=foreground,bg=background)
B1T3 = tk.Button(frame3, text="Train", command=traincommand,bg='orange')
B2T3 = tk.Button(frame3, text="Open file", font=font, command=opentrain,bg='deep sky blue')
#TAB4
O1T4_tuple = ("Manual: Open","Manual: Fill","Manual: Force", "Auto: Fill", "Auto: Force")
O1T4_stringvar=tk.StringVar(frame4)
O2T4_tuple = ("No","Yes")
O2T4_stringvar=tk.StringVar(frame4)
O3T4_tuple = ("No","Yes")
O3T4_stringvar=tk.StringVar(frame4)
O1T4 = tk.OptionMenu(frame4,O1T4_stringvar, *O1T4_tuple)
O1T4.config(bg=background,fg=foreground,font=font)
O1T4_stringvar.set(Settings[0])
O2T4 = tk.OptionMenu(frame4,O2T4_stringvar, *O2T4_tuple)
O2T4.config(bg=background,fg=foreground,font=font)
O2T4_stringvar.set(Settings[3])
O3T4 = tk.OptionMenu(frame4,O3T4_stringvar, *O3T4_tuple)
O3T4.config(bg=background,fg=foreground,font=font)
O3T4_stringvar.set(Settings[4])
S1T4 = tk.Scale(frame4,from_=0, to=30,orient='horizontal',font=font,fg=foreground,bg=background)
S2T4 = tk.Scale(frame4,from_=0, to=30,orient='horizontal',font=font,fg=foreground,bg=background)
S1T4.set(Settings[1])
S2T4.set(Settings[2])
L1T4 = tk.Label(frame4,text='Function:',font=font,fg=foreground,bg=background)
L2T4 = tk.Label(frame4,text='Refresh:',font=font,fg=foreground,bg=background)
L3T4 = tk.Label(frame4,text='Delay:',font=font,fg=foreground,bg=background)
L4T4 = tk.Label(frame4,text='Autoresume:',font=font,fg=foreground,bg=background)
L5T4 = tk.Label(frame4,text='Autotrain:',font=font,fg=foreground,bg=background)
B1T4 = tk.Button(frame4, text="Write to file", font=font, command=writesettings,bg='orange')
B2T4 = tk.Button(frame4, text="Delete autostart", font=('Georgia',9), command=delete_startup,bg='red')
#TAB5
infotext="\nHey!\nLet go of the bug!\n\n\n\nThis program has been written\nby Timothy Bergström and\nis used to autofill Googleforms"
L1T5 = tk.Label(frame5,text=infotext,font=font,fg=foreground,bg=background)
#TAB6 (Hidden)
infotext="\nDon't put a new \nbug in the program!!\n\n\n\nSigh..."
L1T6 = tk.Label(frame6,text=infotext,font=font,fg=foreground,bg=background)
#Packing Tab1
L1T1.pack()
E1T1.pack()
B1T1.pack()
B2T1.pack(side='bottom')
#Packing Tab2
for i in range(1,8):
vars()['L'+str(i)+'T2'].grid(row=i,column=0)
for i in range(1,8):
vars()['E'+str(i)+'T2'].grid(row=i,column=1)
B1T2.grid(row=9,column=1)
#Packing Tab3
L1T3.pack()
E1T3.pack()
L2T3.pack()
E2T3.pack()
B1T3.pack()
B2T3.pack(side='bottom')
#Packing Tab4
O1T4.grid(row=1,column=1)
S1T4.grid(row=2,column=1)
S2T4.grid(row=3,column=1)
O2T4.grid(row=4,column=1)
O3T4.grid(row=5,column=1)
L1T4.grid(row=1,column=0)
L2T4.grid(row=2,column=0)
L3T4.grid(row=3,column=0)
L4T4.grid(row=4,column=0)
L5T4.grid(row=5,column=0)
B1T4.grid(row=6,column=1)
B2T4.grid(row=6,column=0)
#Packing Tab5
L1T5.pack()
#Packing Tab6
L1T6.pack()
#Update
loadstring.set('Loading: \nFinished')
waitfunc(0.5)
#Reveal
top.deiconify()
#Open gif
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
mainanimation.animate()
#checktabs AWW YISSS, IT TOOK SO LONG TO FIND A SOLUTION
ttkNote.bind('<<NotebookTabChanged>>',lambda event: checktab())
#Destroy
loadanimation.stop_animation()
loadwindow.destroy()
#loop
top.mainloop()
|
from abc import abstractmethod
from interfaces.private.canvas_component import CanvasComponent
class CanvasPublicInterface(CanvasComponent):
"""All methods on this interface are to be implemented in thi API implementation"""
@abstractmethod
def __init__(self, size):
"""
:param dimensions: tuple of ints (x,y)
"""
@abstractmethod
def add_drawable(self, drawable):
"""Add a new drawable to the Canvas
:param drawable: An instance of class drawable
:return: None
"""
@abstractmethod
def remove_drawable(self, drawable):
"""Remove a drawable from the canvas
:param drawable: An instance of class drawable currently associated with the canvas.
:return:
"""
@abstractmethod
def drawables(self):
"""Return an iterator over all drawables associated with this canvas"""
@abstractmethod
def setup(self):
"""All user code needed to be run before the animation starts such as initializing variables
This function should be implemented by the API user
:return None:
"""
@abstractmethod
def update_frame(self):
"""All user code needed to be run to update the animation frame.
This function should be implemented by the API user
:return None:
"""
@abstractmethod
def display(self, fps):
"""Display the canvas and run the animation. NOTE this is a blocking function
:param fps: frames per second (integer)
:return: None
"""
@abstractmethod
def sleep(self, time):
"""wait for time specified in seconds."""
def get_position_owners(self, position):
"""Return the drawable at the given position will return self (canvas) if no drawable if present at given point.
:param position: tuple of ints (x,y)
:return: instance of Drawable OR self
"""
def size(self):
"""Return the size of the canvas
:return: tuple of ints (x,y)
"""
|
#!/usr/bin/python3
"""
urbandict.py - urban dictionary module
author: mutantmonkey <[email protected]>
"""
from tools import GrumbleError
import web
import json
import re
API_URL = "http://api.urbandictionary.com/v0/define?term={0}"
WEB_URL = "http://www.urbandictionary.com/define.php?term={0}"
def get_definition(phenny, word, to_user=None):
data = web.get(API_URL.format(web.quote(word)))
data = json.loads(data)
results = data['list']
if not results:
phenny.say("No results found for {0}".format(word))
return
result = results[0]
url = WEB_URL.format(web.quote(word))
response = "{0} - {1}".format(result['definition'].strip()[:256], url)
phenny.say(response, target=to_user)
def urbandict(phenny, input):
""".urb <word> - Search Urban Dictionary for a definition. (supports pointing)"""
word = input.group(1)
if not word:
phenny.say(urbandict.__doc__.strip())
return
to_nick = input.group(2)
get_definition(phenny, word, to_user=to_nick)
urbandict.name = 'urb'
urbandict.rule = (['urb'], r'(.*)')
urbandict.example = '.urb troll'
urbandict.point = True
def urbandict3(phenny, input):
nick, _, __, word = input.groups()
get_definition(phenny, word, nick)
urbandict3.rule = r'(\S*)(:|,)\s\.(urb)\s(.*)'
urbandict3.example = 'svineet: .urb seppuku'
if __name__ == '__main__':
print(__doc__.strip())
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
str = '''Train Epoch: 1 Train Iteration: 260 Time 1.609s / 20iters, (0.080) Data load 0.036s / 20iters, (0.001794)
Learning rate = [0.1, 0.1] Loss = {ce_loss: 2.3057, loss: 2.3117}
'''
print("#+++++++++++#")
RE_CLS_IC_TRAIN = re.compile(r'Train Epoch: (?P<epoch>\d+)\t'
r'Train Iteration: (?P<iters>\d+)\t'
r'Time (?P<batch_time_sum>\d+\.?\d*)s / (?P<batch_iters>\d+)iters, '
r'\((?P<batch_time_avg>\d+\.?\d*)\)\t'
r'Data load (?P<date_time_sum>\d+\.?\d*)s / (?P<_batch_iters>\d+)iters, '
r'\((?P<date_time_avg>\d+\.?\d*)\)\n'
r'Learning rate = (?P<learning_rate>.*)\t'
r'Loss = (?P<train_loss>.*)\n')
res = RE_CLS_IC_TRAIN.search(str)
if res:
print(type(res.groupdict()['epoch']))
print(res.groupdict()['iters'])
print(res.groupdict()['batch_time_sum'])
print(res.groupdict()['batch_iters'])
print(res.groupdict()['batch_time_avg'])
print(res.groupdict()['date_time_sum'])
print(res.groupdict()['date_time_avg'])
print(res.groupdict()['learning_rate'])
print(res.groupdict()['train_loss'])
print("#+++++++++++#")
RE_CLS_IC_TRAIN = re.compile(r'Train Epoch: (?P<epoch>\d+)\t'
r'Train Iteration: (?P<iters>\d+)\t'
r'Time (?P<batch_time_sum>\d+\.?\d*)s / (?P<batch_iters>\d+)iters, '
r'\((?P<batch_time_avg>\d+\.?\d*)\)\t'
r'Data load (?P<date_time_sum>\d+\.?\d*)s / (?P<_batch_iters>\d+)iters, '
r'\((?P<date_time_avg>\d+\.?\d*)\)\n'
r'Learning rate = (?P<learning_rate>.*)\t'
r'Loss = .*loss: (?P<train_loss>\d+\.?\d*).*\n')
res = RE_CLS_IC_TRAIN.search(str)
if res:
print(res.groupdict()['train_loss'])
|
import os
import sys
os.environ['SERVICE'] = 'aws-cd-pipeline'
os.environ['STACK'] = 'localstack'
os.environ['STAGE'] = 'localtest'
# manipulating sys.path to make importing inside tests because ¯\_(ツ)_/¯
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(here, '..', 'src'))
|
import pybullet as p
#import pybullet_data as pd
import os
import wavefront as wf
import util
import optimization as opt
import numpy as np
import argparse, sys
parser = argparse.ArgumentParser(description=__doc__, formatter_class=
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input_obj", "-i", type=str, dest="input_obj", default="mesh.obj", help="File name of the .obj input")
parser.add_argument("--output_csv", "-o", type=str, dest="output_csv", default="result.csv", help="File name of the .csv output (after optimization)")
parser.add_argument("--approx_csv", "-a", type=str, dest="approx_csv", default="quick_approx.csv", help="File name of a quick approximation of the decomposition (before optimization)")
parser.add_argument("--convex_obj", "-c", type=str, dest="convex_obj", default="parts.obj", help="File name of the intermediate .obj convex shapes")
parser.add_argument("--convex_log", type=str, dest="convex_log", default="log.txt", help="File name of the intermediate convex decomposition logs")
parser.add_argument("--interpolate_ratio", "-r", type=float, dest="ratio", default=0.5,
help="Must be in [0,1]; set it close to 1 to make the final output spheres large, close to 0 to make them small")
parser.add_argument("--voxel_resolution", "-v", type=int, dest="voxel_resolution", default=50000,
help="The resolution at which the convex decomposition takes place; larger number usually leads to more final spheres")
parser.add_argument("--max_hull_verts", type=int, dest="max_hull_verts", default=64, help="The max number of vertices of one convex hull")
parser.add_argument("--budget", "-b", type=int, dest="budget", default=5, help="The target number of spheres we want")
args = parser.parse_args(sys.argv[1:])
original_meshes = wf.load_obj(args.input_obj)
assert len(original_meshes) == 1, "This script handles OBJ with one mesh group only."
mesh = original_meshes[0]
p.connect(p.DIRECT)
############## IF OTHER CONVEX DECOMPOSITION PARAM NEEDED TO CHANGE: PLEASE DIRECTLY CHANGE THEM HERE #######################
p.vhacd(args.input_obj, args.convex_obj, args.convex_log, concavity=0.0025, alpha=0.04,
resolution=args.voxel_resolution, maxNumVerticesPerCH=args.max_hull_verts)
#############################################################################################################################
parts = wf.load_obj(args.convex_obj)#, triangulate=True)
parts.append(util.sliceMesh(parts[0], [0,0,1], [0,0,40]))
parts[0] = util.sliceMesh(parts[0], [0,0,-1], [0,0,40])
parts.append(util.sliceMesh(parts[0], [0,0,-1], [0,0,-40]))
parts[0] = util.sliceMesh(parts[0], [0,0,1], [0,0,-40])
xyzr = np.zeros((len(parts), 4))
part_id = 0
for part in parts:
bounding_box = util.bbox(part.vertices)
big_sphere = util.box2ball(bounding_box)
mesh_center = util.coord_avg(part.vertices)
small_sphere = util.inscribedSphereViaPointMesh(mesh_center, part)
decomp_sphere = util.interpolateSphere(big_sphere, small_sphere, args.ratio)
xyzr[part_id,:3] = decomp_sphere.center
xyzr[part_id,-1] = decomp_sphere.radius
part_id += 1
np.savetxt(args.approx_csv, xyzr, header = "x,y,z,r", delimiter=",")
# The ball number which this vertex got assigned to.
assign_list = util.findClosestSphere(mesh.vertices, xyzr)
opt_spheres = opt.optimizeAsgdSpheresFromVert(mesh.vertices, xyzr, assign_list)
np.savetxt(args.output_csv, opt_spheres, header = "x,y,z,r", delimiter=",")
|
'''
Title : Loops
Subdomain : Introduction
Domain : Python
Author : Kalpak Seal
Created : 28 September 2016
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(raw_input())
for i in range(0, n):
print (i ** 2) |
from django.db import models
from django.shortcuts import reverse
import uuid
class Snippet(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=50)
code = models.TextField()
langs = [('Text', 'None'),
('Python', 'Python'),
('JavaScript', 'Javascript'),
]
lang = models.CharField(max_length=18, choices=langs, default='Text')
date = models.DateTimeField(auto_now_add=True)
class Meta():
db_table = "Snippets"
verbose_name = "Snippet"
verbose_name_plural = "Snippets"
def __str__(self):
return self.id.hex
|
from abc import ABC
from typing import List, Tuple
import numpy as np
from absl import logging
from xain.types import KerasWeights
from .evaluator import Evaluator
class Aggregator(ABC):
def __init__(self):
pass
def aggregate(self, thetas: List[Tuple[KerasWeights, int]]) -> KerasWeights:
raise NotImplementedError()
class IdentityAgg(Aggregator):
def aggregate(self, thetas: List[Tuple[KerasWeights, int]]) -> KerasWeights:
assert len(thetas) == 1
return thetas[0][0]
class FederatedAveragingAgg(Aggregator):
def aggregate(self, thetas: List[Tuple[KerasWeights, int]]) -> KerasWeights:
theta_list = [theta for theta, _ in thetas]
weighting = np.array([num_examples for _, num_examples in thetas])
return federated_averaging(theta_list, weighting)
class EvoAgg(Aggregator):
def __init__(self, evaluator: Evaluator):
super().__init__()
self.evaluator = evaluator
def aggregate(self, thetas: List[Tuple[KerasWeights, int]]) -> KerasWeights:
weight_matrices = [theta for theta, num_examples in thetas]
return evo_agg(weight_matrices, self.evaluator, False)
def federated_averaging(
thetas: List[KerasWeights], weighting: np.ndarray
) -> KerasWeights:
assert weighting.ndim == 1
assert len(thetas) == weighting.shape[0]
theta_avg: KerasWeights = thetas[0]
for w in theta_avg:
w *= weighting[0]
# Aggregate (weighted) updates
for theta, update_weighting in zip(thetas[1:], weighting[1:]):
for w_index, w in enumerate(theta):
theta_avg[w_index] += update_weighting * w
weighting_sum = np.sum(weighting)
for w in theta_avg:
w /= weighting_sum
return theta_avg
def evo_agg(
thetas: List[KerasWeights], evaluator: Evaluator, verbose=False
) -> KerasWeights:
"""
- Init different weightings
- Aggregate thetas according to those weightings ("candidates")
- Evaluate all candidates on the validation set
- Pick (a) best candidate, or (b) average of n best candidates
"""
# Compute candidates
# TODO in parallel, do:
theta_prime_candidates = []
for i in range(3):
candidate = compute_candidate(thetas, evaluator)
if verbose:
logging.info(
"candidate {} (weighting {}): {} loss".format(
i, candidate[0], candidate[2]
)
)
theta_prime_candidates.append(candidate)
# Return best candidate
best_candidate = pick_best_candidate(theta_prime_candidates)
return best_candidate
def pick_best_candidate(candidates: List) -> KerasWeights:
_, best_candidate, best_loss, _ = candidates[0]
for _, candidate, loss, _ in candidates[1:]:
if loss < best_loss:
best_candidate = candidate
best_loss = loss
return best_candidate
def compute_candidate(
thetas: KerasWeights, evaluator: Evaluator
) -> Tuple[np.ndarray, KerasWeights, float, float]:
weighting = random_weighting(len(thetas))
# TODO: Validate using of federated_averaging instead of not implemented
# weighted_federated_averaging
theta_prime_candidate = federated_averaging(thetas, weighting)
loss, acc = evaluator.evaluate(theta_prime_candidate)
return weighting, theta_prime_candidate, loss, acc
def random_weighting(num_weightings: int, low=0.5, high=1.5) -> np.ndarray:
return np.random.uniform(low=low, high=high, size=(num_weightings,))
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
import re
import os
def send_email(sender_mail, sender_password, mail_subject, mail_body, receiver_mail, sender_mail_site_smtp,
list_of_attachments):
# An instance of MIMEMultipart
mail_object = MIMEMultipart()
# Initializing the parameters for the mail
# Sender's email address
mail_object['From'] = sender_mail
# Receiver's email address
mail_object['To'] = receiver_mail
# Subject line
mail_object['Subject'] = mail_subject
# Mail Body content
body = mail_body
# Attach mail body content to the mail object
mail_object.attach(MIMEText(body, 'plain'))
# If list of attachments has any attachments, then attach it to mail object
if list_of_attachments:
# Attach each attachment into mail object
for file in list_of_attachments:
filename = file
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(filename, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename))
mail_object.attach(part)
# SMTP session
smtp_session = smtplib.SMTP(sender_mail_site_smtp, 587)
# TLS for security
smtp_session.starttls()
# Authentication
smtp_session.login(sender_mail, sender_password)
# Converts the Multipart mail_object into a string
text = mail_object.as_string()
# sending the mail
smtp_session.sendmail(sender_mail, receiver_mail, text)
# terminating the session
smtp_session.quit()
###################################
def get_mail_site_smtp(mail_address):
mail_sites_smtp_list = {'gmail': 'smtp.gmail.com',
'Outlook.com': 'smtp.live.com',
'Office365.com': 'smtp.office365.com',
'Yahoo Mail': 'smtp.mail.yahoo.com',
'Yahoo Mail Plus': 'plus.smtp.mail.yahoo.com',
'Yahoo UK': 'smtp.mail.yahoo.co.uk',
'Yahoo Deutschland': 'smtp.mail.yahoo.com',
'Yahoo AU/NZ': 'smtp.mail.yahoo.com.au',
'O2': 'smtp.o2.ie',
'O2.uk': 'smtp.o2.co.uk',
'AOL.com': 'smtp.aol.com',
'AT&T': 'smtp.att.yahoo.com',
'NTL @ntlworld.com': 'smtp.ntlworld.com',
'Orange': 'smtp.orange.net',
'Orange.uk': 'smtp.orange.co.uk',
'Wanadoo UK': 'smtp.wanadoo.co.uk',
'Hotmail': 'smtp.live.com',
'O2 Online Deutschland': 'securesmtp.t-online.de',
'1&1 (1and1)': 'smtp.1and1.com',
'1&1 Deutschland': 'smtp.1und1.de',
'Comcast': 'smtp.comcast.net',
'zoho Mail': 'smtp.zoho.com',
'Mail.com': 'smtp.mail.com',
'GMX.com': 'smtp.gmx.com',
'Net@ddress by USA.NET': 'smtp.postoffice.net'}
# Source: https://www.arclab.com/en/kb/email/list-of-smtp-and-pop3-servers-mailserver-list.html
mail_pattern = r'([a-zA-Z0-9]+)@([a-zA-Z0-9]+).([a-zA-Z0-9]{3})'
match = re.search(mail_pattern, mail_address)
sender_mail_site = match.group(2)
print('Can you please type the email site you are using from below list of sites?')
if sender_mail_site in mail_sites_smtp_list.keys():
for mail_site in mail_sites_smtp_list.keys():
if sender_mail_site in mail_site:
print(mail_site)
choice1 = input(': ')
return mail_sites_smtp_list[choice1]
else:
print(mail_sites_smtp_list.keys())
print('Type "N" if you do not see your site listed')
choice2 = input(': ')
if choice2 in ['N', 'n']:
return False
###################################
# Mail Address
print("Enter details:")
MAIL_ADDRESS = input('Your mail address: ')
mail_address_valid = False
while not mail_address_valid:
mail_address_pattern = r'^[a-z0-9A-Z]+[._]?[a-z0-9A-Z]+[@]\w+[.]\w{2,3}$'
if re.search(mail_address_pattern, MAIL_ADDRESS):
mail_address_valid = True
else:
print('Please enter in the format: [email protected]. Eg. [email protected]')
# Password
PASSWORD = input('Password: ')
print('## LOGIN Initialized ##')
# Subject
SUBJECT = input('Subject line: ')
# Mail body content
print('Reading mail body from mail_body.txt file ...')
mail_body_file = open('mail_body.txt', 'r')
BODY = mail_body_file.read()
mail_body_file.close()
# Preview
print('Mail preview: ')
print('#########################')
print('Subject: ' + SUBJECT)
print(BODY)
print('#########################')
# Mail recipients/receivers
print('Reading mail recipients from recipients.txt file ...')
recipients = open('recipients.txt', 'r')
ALL_RECIPIENTS = recipients.readlines()
recipients.close()
print('Total recipients found in recipients.txt: ' + str(len(ALL_RECIPIENTS)))
# Mail SMTP
MAIL_SMTP = get_mail_site_smtp(MAIL_ADDRESS)
# Attachments:
ATTACHMENTS = list()
choice = input('Do you want to attach files to the mail? Y/N: ')
if choice in ['Y', 'y']:
num_files = int(input('Number of files: '))
for n in range(num_files):
filefull = input('Enter file\'s absolute path: ')
ATTACHMENTS.append(filefull)
###################################
proceed = input('Proceed? Y/N: ')
if proceed in ['Y', 'y']:
email_sent_count = 0
for RECIPIENT in ALL_RECIPIENTS:
send_email(MAIL_ADDRESS, PASSWORD, SUBJECT, BODY, RECIPIENT, MAIL_SMTP, ATTACHMENTS)
email_sent_count += 1
print(str(email_sent_count) + '. ' + RECIPIENT.strip() + ' --> Sent')
print('TOTAL MAILS SENT: ' + str(email_sent_count))
else:
print('Closing program ...')
|
import sys
from validator.submain import populate_chrome_manifest
from validator.rdf import RDFParser
from validator.xpi import XPIManager
from validator.errorbundler import ErrorBundle
from validator.outputhandlers.shellcolors import OutputHandler
import validator.testcases.regex as regex
def _do_test(path, test, failure=True,
require_install=False, set_type=0,
listed=False, xpi_mode="r"):
package_data = open(path, "rb")
package = XPIManager(package_data, mode=xpi_mode, name=path)
err = ErrorBundle()
if listed:
err.save_resource("listed", True)
# Populate in the dependencies.
if set_type:
err.detected_type = set_type # Conduit test requires type
if require_install:
err.save_resource("has_install_rdf", True)
rdf_data = package.read("install.rdf")
install_rdf = RDFParser(err, rdf_data)
err.save_resource("install_rdf", install_rdf)
populate_chrome_manifest(err, package)
test(err, package)
print err.print_summary(verbose=True)
if failure:
assert err.failed()
else:
assert not err.failed()
return err
class TestCase(object):
def setUp(self):
self.err = None
self.is_jetpack = False
self.is_bootstrapped = False
self.detected_type = None
self.listed = True
def reset(self):
"""
Reset the test case so that it can be run a second time (ideally with
different parameters).
"""
self.err = None
def setup_err(self, for_appversions=None):
"""
Instantiate the error bundle object. Use the `instant` parameter to
have it output errors as they're generated. `for_appversions` may be set
to target the test cases at a specific Gecko version range.
An existing error bundle will be overwritten with a fresh one that has
the state that the test case was setup with.
"""
self.err = ErrorBundle(instant=True,
for_appversions=for_appversions or {},
listed=self.listed)
self.err.handler = OutputHandler(sys.stdout, True)
if self.is_jetpack:
self.err.metadata["is_jetpack"] = True
if self.is_bootstrapped:
self.err.save_resource("em:bootstrap", True)
if self.detected_type is not None:
self.err.detected_Type = self.detected_type
def assert_failed(self, with_errors=False, with_warnings=None):
"""First, asserts that the error bundle registers a failure
(recognizing whether warnings are acknowledged). Second, if
`with_errors`is True, the presence of errors is asserted. If it is not
true (default), it is tested that errors are not present. If
`with_warnings` is not None, the presence of warnings is tested just
like `with_errors`.
"""
assert self.err.failed(
fail_on_warnings=with_warnings or with_warnings is None), \
"Test did not fail; failure was expected."
if with_errors:
assert self.err.errors, "Errors were expected."
elif self.err.errors:
raise AssertionError("Tests found unexpected errors: %s" %
self.err.print_summary(verbose=True))
if with_warnings is not None:
if with_warnings:
assert self.err.warnings, "Warnings were expected."
elif self.err.warnings:
raise ("Tests found unexpected warnings: %s" %
self.err.print_summary())
def assert_notices(self):
"""Assert that notices have been generated during the validation
process.
"""
assert self.err.notices, "Notices were expected."
def assert_passes(self, warnings_pass=False):
"""Assert that no errors have been raised. If `warnings_pass` is True,
also assert that there are no warnings.
"""
assert not self.failed(fail_on_warnings=not warnings_pass), \
("Test was intended to pass%s, but it did not." %
(" with warnings" if warnings_pass else ""))
def assert_silent(self):
"""
Assert that no messages (errors, warnings, or notices) have been
raised.
"""
assert not self.err.errors, 'Got these: %s' % self.err.errors
assert not self.err.warnings, 'Got these: %s' % self.err.warnings
assert not self.err.notices, 'Got these: %s' % self.err.notices
assert not any(self.err.compat_summary.values()), \
"Found compatibility messages."
def assert_got_errid(self, errid):
"""
Assert that a message with the given errid has been generated during
the validation process.
"""
assert any(msg["id"] == errid for msg in
(self.err.errors + self.err.warnings + self.err.notices)), \
"%s was expected, but it was not found." % repr(errid)
class RegexTestCase(TestCase):
"""
A helper class to provide functions useful for performing tests against
regex test scenarios.
"""
def run_regex(self, input, is_js=False):
"""Run the standard regex tests for non-JavaScript input."""
if self.err is None:
self.setup_err()
if not is_js:
input = '<input onclick="%s" />' % input
else:
input = "'use strict';\n%s" % input
regex.run_regex_tests(input, self.err, "foo.txt", is_js=is_js)
def run_js_regex(self, input):
"""Run the standard regex tests for JavaScript input."""
if self.err is None:
self.setup_err()
regex.run_regex_tests(input, self.err, "foo.txt", is_js=True)
class MockZipFile:
def namelist(self):
return []
class MockXPI:
def __init__(self, data=None, subpackage=False):
if not data:
data = {}
self.zf = MockZipFile()
self.data = data
self.subpackage = subpackage
self.filename = "mock_xpi.xpi"
def test(self):
return True
def info(self, name):
return {"name_lower": name.lower(),
"extension": name.lower().split(".")[-1]}
def __iter__(self):
def i():
for name in self.data.keys():
yield name
return i()
def __contains__(self, name):
return name in self.data
def read(self, name):
return open(self.data[name]).read()
|
from src.handler import inc
def test_answer():
assert inc(3) == 4
|
"""Packager for cloud environment."""
from setuptools import setup, find_packages
setup(
name='preprocess',
version='1.0.0',
packages=find_packages(),
install_requires=[
'tensorflow',
'numpy',
],
)
|
import csv
import logging
import math
import os
from datetime import datetime
import pytest
from brainscore.submission.database import connect_db
from brainscore.submission.evaluation import run_evaluation
from brainscore.submission.models import Score, Model, Submission
from tests.test_submission.test_db import clear_schema, init_user
logger = logging.getLogger(__name__)
#
# Integration tests for the submission systems, executing 4 submissions:
# 1: ID:33 Working submission, executing one benchmark on Alexnet (zip + json)
# 2: ID:34 Rerunning Alexnet on another benchmark (only json)
# 3: ID:35 Failing installation submission (zip + json)
# 4: ID:36 Submission is installable, but model (Alexnet) is not scoreable (zip + json)
#
@pytest.mark.memory_intense
@pytest.mark.private_access
class TestIntegration:
databse = 'brainscore-ohio-test'
@classmethod
def setup_class(cls):
logger.info('Connect to database')
connect_db(TestIntegration.databse)
clear_schema()
def setup_method(self):
logger.info('Initialize database')
init_user()
def teardown_method(self):
logger.info('Clean database')
clear_schema()
def compare(self, a, b):
return abs(a - b) <= 0.0001
def test_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
run_evaluation(config_dir, working_dir, 33, TestIntegration.databse, models=['alexnet'],
benchmarks=['dicarlo.MajajHong2015.IT-pls'])
with open('result_33.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'dicarlo.MajajHong2015.IT-pls'
assert self.compare(float(result_row[2]), 0.5857491098187586)
assert self.compare(float(result_row[3]), 0.5079816726934638)
assert self.compare(float(result_row[4]), 0.003155449372125895)
scores = Score.select()
assert len(scores) == 1
# successful score comment should inform about which layers were used for which regions
assert scores[0].comment.startswith("layers:")
def test_rerun_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
submission = Submission.create(id=33, submitter=1, timestamp=datetime.now(),
model_type='BaseModel', status='running')
model = Model.create(name='alexnet', owner=submission.submitter, public=False,
submission=submission)
with open(f'{config_dir}submission_34.json', 'w') as rerun:
rerun.write(f"""{{
"model_ids": [{model.id}], "user_id": 1}}""")
run_evaluation(config_dir, working_dir, 34, TestIntegration.databse,
benchmarks=['dicarlo.Rajalingham2018-i2n'])
with open('result_34.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'dicarlo.Rajalingham2018-i2n'
assert self.compare(float(result_row[2]), 0.25771746331458695)
assert self.compare(float(result_row[3]), 0.3701702418190641)
assert self.compare(float(result_row[4]), 0.011129032024657565)
def test_failure_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
with pytest.raises(Exception):
run_evaluation(config_dir, working_dir, 35, TestIntegration.databse, models=['alexnet'],
benchmarks=['dicarlo.Rajalingham2018-i2n'])
def test_model_failure_evaluation(self, tmpdir):
# os.environ['RESULTCACHING_DISABLE'] = 'brainscore.score_model,model_tools'
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
run_evaluation(config_dir, working_dir, 36, TestIntegration.databse, models=['alexnet'],
benchmarks=['movshon.FreemanZiemba2013.V1-pls'])
with open('result_36.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'movshon.FreemanZiemba2013.V1-pls'
assert result_row[2] == '0'
assert result_row[3] == '0'
model = Model.get()
score = Score.get(model=model)
assert score.comment is not None # When there's a problem, the comment field contains an error message
# os.environ['RESULTCACHING_DISABLE'] = '0' |
import marshmallow as ma
class FooSchema(ma.Schema):
name = ma.fields.String(metadata={'title': 'foo name', 'description':
'foo name'})
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 29 16:02:56 2019
@author: hkf
"""
import xml.etree.ElementTree as ET
import os
import cv2
from tools_hu.utils.utils import bboxes_iou
from tools_hu.utils.Code_dictionary import CodeDictionary
import numpy as np
import random
def cut_defect(images, annotations, output_dir, code_dict=None, cut_normal=False):
for root_dir, _, files in os.walk(images):
for file in files:
if not (file.endswith('jpg') or (file.endswith('JPG'))):
continue
img_path = os.path.join(root_dir, file)
xml = file.replace('jpg', 'xml')
xml_path = os.path.join(annotations, xml)
try:
tree = ET.parse(xml_path)
except Exception as e:
print('no file named {}'.format(xml_path))
continue
root = tree.getroot()
objs = root.findall('object')
img = cv2.imread(img_path)
H, W, D = img.shape
start_id = 0
gt_bbox = []
for obj in objs:
category = obj[0].text
if code_dict is not None:
category = str(code_dict.code2id(category))
bbox = [int(float(obj[4][i].text)) for i in range(4)]
cut = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
gt_bbox.append(np.array(bbox))
save_dir_code = os.path.join(output_dir, category)
if not os.path.exists(save_dir_code):
os.makedirs(save_dir_code)
cut_name = '__'.join(
[file[:-4], category, str(bbox[0]), str(bbox[1]), str(bbox[2]), str(bbox[3])]) + '.jpg'
start_id += 1
cut_path = os.path.join(save_dir_code, cut_name)
cv2.imwrite(cut_path, cut)
if cut_normal:
samples = 5
save_dir_code = os.path.join(output_dir, 'normal')
if not os.path.exists(save_dir_code):
os.makedirs(save_dir_code)
for _ in range(samples):
size = random.sample(ANCHOR_SCALE, 1)[0]
ratio = random.sample(ANCHOR_RATIO, 1)[0]
width = int(size*ratio)
height = int(width/ratio)
if width >= W or height >= H:
continue
xmin = random.randint(0, W - width)
ymin = random.randint(0, H - height)
xmax = xmin + width
ymax = ymin + height
bbox = [xmin, ymin, xmax, ymax]
if np.all(bboxes_iou(np.array(bbox), np.array(gt_bbox))<0.3):
cut = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
cut_name = '__'.join(
[file[:-4], 'normal', str(bbox[0]), str(bbox[1]), str(bbox[2]), str(bbox[3])]) + '.jpg'
cut_path = os.path.join(save_dir_code, cut_name)
cv2.imwrite(cut_path, cut)
else:
print('skip')
continue
if __name__ == '__main__':
defect_dir = r'D:\Project\chongqing_contest\data\chongqing1_round1_train1_20191223\top\train'
xml_dir = r'D:\Project\chongqing_contest\data\chongqing1_round1_train1_20191223\top\train'
save_dir = r'D:\Project\chongqing_contest\data\chongqing1_round1_train1_20191223\top\cut'
category_file = r'D:\Project\chongqing_contest\data\chongqing1_round1_train1_20191223\top\classes.txt'
id_file = r'D:\Project\chongqing_contest\data\chongqing1_round1_train1_20191223\top\id.txt'
cd = CodeDictionary(category_file, id_file)
ANCHOR_SCALE = [32, 64, 128, 256]
ANCHOR_RATIO = [0.2, 0.5, 1.0, 2.0, 5.0]
cut_defect(defect_dir, xml_dir, save_dir, cd, cut_normal=True)
|
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from fuzzywuzzy import fuzz, process
from nltk.tokenize import wordpunct_tokenize
from pandas_profiling import ProfileReport
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import re
stop_words = set(stopwords.words('english')) # srop words list
sns.set_style('whitegrid')
IT_stop = stop_words.copy()
IT_stop.discard('it')
def fuzzy_match(city,matchs_list):
"""returns the the most likely matching value if not, it returns the city_name asis"""
match = process.extractOne(city, matchs_list)
if match[1] > 60:
return match[0]
else:
return city
def parse_html(html_doc):
"""returns a string of parsed html with all stop words removed"""
warnings.filterwarnings("ignore")
try:
soup = BeautifulSoup(html_doc, 'html.parser')
list_of_words = [i for i in wordpunct_tokenize(
re.sub(r'\d+|[^\w\s]', '', (soup.text.lower()))) if i not in stop_words ]
return ' '.join(map(lambda x: '%s' % x, list_of_words))
except TypeError:
return np.NaN
def clean_text(row, tech_list):
"""returns a string of parsed html with all stop words removed"""
row = str(row)
try:
list_of_words = [i for i in wordpunct_tokenize(
re.sub(r'\d+|[^\w\s]', ' ', (row.lower()))) if i in tech_list]
astring = ' '.join(map(lambda x: '%s' % x, list_of_words))
return astring
except TypeError:
return np.NaN
def mean_exper(row):
if fuzz.partial_ratio(row,['123456789']) > 0:
try:
_min = list(re.findall('\d+',row))[0]
except IndexError:
return np.nan
try:
_max = list(re.findall('\d+',row))[1]
except IndexError:
return int(_min)
return (int(_min)+int(_max))/2
def clean_expr_years(row):
if fuzz.partial_ratio(row,['123456789']) > 0:
try:
_min = list(re.findall('\d+',row))[0]
except IndexError:
return np.nan
try:
_max = list(re.findall('\d+',row))[1]
except IndexError:
return _min
return '{}-{}'.format(_min,_max)
def min_max_salary(to_mach,thresh=60):
listo = []
for i in data.displayed_job_title:
if fuzz.partial_ratio(to_mach,i) > thresh:
listo.append(i)
sub3 = data[data.displayed_job_title.isin(listo)]
_shape = sub3.shape
_min = sub3.salary_min.mean()
_max = sub3.salary_max.mean()
return """based on {} results the min salary is {} and the max is {} for jobs the contains {} keyword""".format(_shape[0],_min,_max,to_mach)
def rec(job,num,match_list):
matches = process.extract(query=job,limit=num, choices=match_list, scorer=fuzz.partial_ratio)
return pd.DataFrame(matches).ix[:,0]
def job_plot(data,variable,cat_num=10):
"""this function takes a categorical variable and the dataframe it's in and the number of levels
and it returns a barplot visualization """
my_colors = [(x/12.0, x/25.0, 0.5) for x in range(cat_num)]
return data[variable].value_counts().head(cat_num).plot(kind='bar',
figsize=(15,6),
color=my_colors,
title = 'the most frequent {} classes of the {} variable'.format(cat_num,variable))
|
"""
Copyright 2019 Faisal Thaheem
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Ugly hack to allow absolute import from the root folder
# whatever its name is. Please forgive the heresy.
if __name__ == "__main__" and __package__ is None:
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
__package__ = "plateclassifier"
from shared.amqp import ThreadedAmqp
import shared.utils as utils
from shared.obj_detector import ObjDetector
import pprint
import yaml
import threading
import sys
import numpy as np
import time
import os
import argparse as argparse
import json
import queue
import uuid
import logging
import pickle
import traceback
import signal
import json
from pymongo import MongoClient
from skimage.transform import resize
import cv2
#create logger
logger = logging.getLogger('plateclassifier.service')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('plateclassifier.service.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s] %(message)s',"%Y-%m-%d %H:%M:%S")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
class PlateClassifier():
pc_model, pc_classes, pc_graph, pc_sess = None,None,None,None
_consumer = None
_publisher = None
def __init__(self, args):
self.pc_model, self.pc_classes, self.pc_graph, self.pc_sess = utils.loadModel(config['model']["modelfile"], config["model"]['modelLoss'])
brokerUrl = config['broker']['uri']
logger.info("Using broker url [{}]".format(brokerUrl))
self._consumer = ThreadedAmqp()
self._consumer.callbackEvents.on_message += self.newImageQueued
self._consumer.init(
brokerUrl,
consumerMode=True,
exchange=config['broker']['consumeFrom']['exchange'],
exchangeType=config['broker']['consumeFrom']['exchangeType'],
routingKey=config['broker']['consumeFrom']['routingKey'],
queueName=config['broker']['consumeFrom']['queueName'],
)
self._publisher = ThreadedAmqp()
self._publisher.init(
brokerUrl,
exchange=config['broker']['publishTo']['exchange'],
exchangeType=config['broker']['publishTo']['exchangeType'],
routingKey=config['broker']['publishTo']['routingKey'],
consumerMode=False
)
logger.info("Init complete")
def classifyPlate(self, img):
plate_img_with_black_bg = utils.overlayImageOnBlackCanvas(img)
plate_img_gs = utils.convertToGrayscaleForClassification(plate_img_with_black_bg)
plate_img_expanded = np.expand_dims(plate_img_gs, axis=0)
with self.pc_graph.as_default():
with self.pc_sess.as_default():
predictions = self.pc_model.predict(plate_img_expanded)
max_score_index = np.argmax(predictions[0])
return self.pc_classes[max_score_index], float(predictions[0][max_score_index])
def cleanup(self):
if self._consumer is not None:
self._consumer.stop()
if self._publisher is not None:
self._publisher.stop()
def loop_forever(self):
self._consumer.start()
self._publisher.start()
def getDbConnection(self):
client = MongoClient(config['mongo']['uri'])
#open db
if not "openlpr" in client.database_names():
logger.info("database openlpr does not exist, will be created after first document insert")
return client
def getDoc(self, docid):
client = self.getDbConnection()
db = client["openlpr"]
query = {"_id": docid}
col = db['lprevents']
document = col.find_one(query)
client.close()
return document
def updateDb(self, doc):
client = self.getDbConnection()
db = client["openlpr"]
query = {"_id": doc['_id']}
updatedDoc = { "$set": doc}
col = db['lprevents']
col.update_one(query, updatedDoc)
client.close()
def newImageQueued(self, msg):
logger.debug(msg)
try:
# load image
msg = json.loads(msg)
diskpath = os.path.join(config['storage']['path'], msg['unique_name'])
originalImage = utils.load_image_into_numpy_array(diskpath, None, False)
originalShape = originalImage.shape
logger.debug("Loaded image [{}]".format(diskpath))
msg['classifications'] = []
document = msg
# slice
plate_images = []
for i in range(0, len(document['detections']['boxes'])):
if document['detections']['scores'][i] >= config['classification']['minScore']:
plateImage = originalImage[
document['detections']['boxes'][0][0]:document['detections']['boxes'][0][2],
document['detections']['boxes'][0][1]:document['detections']['boxes'][0][3]
]
#save this plate image to be used in ocr
filename = "{}_plate_{}.jpg".format(msg['_id'],i)
plate_images.append(filename)
filename = os.path.join(config['storage']['path'], filename)
cv2.imwrite(filename, cv2.cvtColor(plateImage.copy(), cv2.COLOR_RGB2BGR))
platetype, score = self.classifyPlate(plateImage)
else:
platetype, score = 'not classified',0.0
msg['classifications'].append(
{
'platetype': platetype,
'score': score
}
)
logger.info("[{}] classified as [{}] with confidence [{}]".format(msg['_id'],platetype, score))
#todo fix later, possible bug, num plates inequal num classifications/detections
msg['plate_imgs'] = plate_images
# save to db
self.updateDb(msg)
# dispatch to mq
self._publisher.publish_message(msg)
except:
logger.error("An error occurred: ", exc_info=True)
def signal_handler(sig, frame):
try:
logger.info("Ctrl-c pressed, stopping")
if detector is not None:
detector.cleanup()
except:
logger.error("An error occurred: ", exc_info=True)
if __name__ == '__main__':
global args
global config
global detector
ap = argparse.ArgumentParser()
ap.add_argument("-cf", "--config.file", default='plateclassifier.yaml',
help="Config file describing service parameters")
args = vars(ap.parse_args())
#handle ctrl-c
signal.signal(signal.SIGINT, signal_handler)
with open(args["config.file"]) as stream:
try:
if os.getenv('PRODUCTION') is not None:
config = yaml.load(stream)['prod']
else:
config = yaml.load(stream)['dev']
pprint.pprint(config)
except yaml.YAMLError as err:
logger.error("An error occurred: ", exc_info=True)
detector = PlateClassifier(args)
detector.loop_forever()
|
import time
from suvec.common.crawling import CrawlRunner
from suvec.common.postproc.data_managers.ram_data_manager import RAMDataManager
from suvec.common.postproc import ParsedProcessorWithHooks
from suvec.common.postproc.processor_hooks import HookSuccessParseNotifier
from suvec.common.top_level_types import User
from suvec.common.requesting import EconomicRequester
from suvec.common.events_tracking.terminal_events_tracker import TerminalEventsTracker
from suvec.common.postproc.data_managers.data_long_term_saver import DataLongTermSaver
from suvec.common.requesting.requested_users_storage import RequestedUsersFileStorage
from suvec.common.requesting.users_filter import DuplicateUsersFilter
from .executing.pool_executor import VkApiPoolExecutor
from .executing.async_pool_executor import AsyncVkApiPoolExecutor
from .executing.mutli_session_async_pool_executor import MultiSessionAsyncVkApiPoolExecutor
from .executing.responses_factory import AioVkResponsesFactory
from suvec.vk_api_impl.session.records_managing.records_storing import ProxyStorage, CredsStorage
from suvec.vk_api_impl.session.resource_testing import ResourceTester
from .executing.responses_factory import VkApiResponsesFactory
from .requesting import VkApiRequestsCreator
from .errors_handler import VkApiErrorsHandler
from .session.records_managing.proxy_manager import ProxyManager
from .session.records_managing.creds_manager import CredsManager
from .session import SessionManagerImpl
class VkApiCrawlRunner(CrawlRunner):
# TODO: If performance will become a problem, will need to refactor from single-user methods to batch-of-users
# methods and use multithreading
# TODO: separate creating of complex objects and runner to 2 components. First will get all settings and will have
# build methods, second will call objects, register listeners, run crawling
# TODO: need integration tests with crawl runner and mock components to test that all listen/notify connections
# are set up and work properly
def __init__(self, start_user_id: int, proxy_storage: ProxyStorage, creds_storage: CredsStorage,
long_term_save_pth: str, data_backup_path: str,
logs_pth: str = "../logs.txt",
tracker=None, requester_max_requests_per_loop=10000,
tracker_response_freq=500,
access_resource_reload_hours=1, use_async=True, nb_sessions=1,
dmp_long_term_steps=2000):
if tracker is None:
tracker = TerminalEventsTracker(log_pth=logs_pth, report_every_responses_nb=tracker_response_freq)
self.tracker = tracker
self.events_tracker = tracker
CrawlRunner.__init__(self, tracker=tracker)
requests_creator = VkApiRequestsCreator()
friends_req_storage = RequestedUsersFileStorage("./resources/checkpoints/dumped_friends_requests.txt")
groups_req_storage = RequestedUsersFileStorage("./resources/checkpoints/dumped_groups_requests.txt")
users_filter = DuplicateUsersFilter()
self.requester = EconomicRequester(
requests_creator,
friends_req_storage=friends_req_storage,
groups_req_storage=groups_req_storage,
users_filter=users_filter,
max_requests_per_call=requester_max_requests_per_loop
)
errors_handler = VkApiErrorsHandler(tracker)
proxy_manager = ProxyManager(proxy_storage, tracker,
hours_for_resource_reload=access_resource_reload_hours)
creds_manager = CredsManager(creds_storage, tracker,
hours_for_resource_reload=access_resource_reload_hours)
tester = ResourceTester(errors_handler)
self.session_manager = SessionManagerImpl(errors_handler, proxy_manager, creds_manager, tester)
if use_async:
responses_factory = AioVkResponsesFactory()
if nb_sessions == 1:
self.executor = AsyncVkApiPoolExecutor(self.session_manager, responses_factory, errors_handler)
else:
self.executor = MultiSessionAsyncVkApiPoolExecutor(self.session_manager, responses_factory,
errors_handler, nb_sessions=nb_sessions)
else:
responses_factory = VkApiResponsesFactory()
self.executor = VkApiPoolExecutor(self.session_manager, responses_factory)
long_term_saver = DataLongTermSaver(long_term_save_pth, data_backup_path)
self.data_manager = RAMDataManager(long_term_saver, dmp_long_term_every=dmp_long_term_steps)
self.parsed_processor = ParsedProcessorWithHooks(self.data_manager, tracker,
errors_handler=errors_handler)
success_request_notifier_hook = HookSuccessParseNotifier()
success_request_notifier_hook.register_request_success_listener(self.requester)
self.parsed_processor.add_process_success_hook(success_request_notifier_hook)
errors_handler.register_session_error_listener(self.session_manager)
errors_handler.register_user_unrelated_listener(self.requester)
self.continue_crawling = True
self.has_to_break_parsing = False
self.candidates = [User(id=start_user_id)]
def run(self):
while self.continue_crawling:
self.requester.add_users(self.candidates)
print("added users")
requests = self.requester.get_requests()
print("requests", len(requests))
start_execute = time.time()
parsed = self.executor.execute(requests)
print("time to get responses", time.time() - start_execute)
print("responses", len(parsed))
process_start_time = time.time()
for parsed_response in parsed:
self.parsed_processor.process(parsed_response)
process_time = time.time() - process_start_time
# TODO: deleted termination of processing in case of access error, so need to check performance drop
print("time to process", process_time)
self.candidates = self.parsed_processor.get_new_parse_candidates()
self.tracker.state_report()
self.end_loop()
def end_loop(self):
pass
def stop(self):
self.continue_crawling = False
|
def add_numbers(start,end):
total = 0
for number in range(start, end + 1):
print(number)
total = total + number
return(total)
test1 = add_numbers(1,2)
print(test1)
test2 = add_numbers(1, 100)
print(test2)
test3 = add_numbers(1000, 5000)
print(test3)
answer = add_numbers(1, 5000)
print(answer)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Test export of PyTorch operators using ONNX Runtime contrib ops."""
import copy
import distutils.version
import io
import unittest
import numpy as np
import onnx
import parameterized
import torch
import onnxruntime
from onnxruntime.tools import pytorch_export_contrib_ops
def _torch_version_lower_than(version: str):
return distutils.version.LooseVersion(torch.__version__) < distutils.version.LooseVersion(version)
def ort_test_with_input(ort_sess, input, output, rtol, atol):
input, _ = torch.jit._flatten(input)
output, _ = torch.jit._flatten(output)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, input))
outputs = list(map(to_numpy, output))
ort_inputs = dict((ort_sess.get_inputs()[i].name, input) for i, input in enumerate(inputs))
ort_outs = ort_sess.run(None, ort_inputs)
# compare onnxruntime and PyTorch results
assert len(outputs) == len(ort_outs), "number of outputs differ"
# compare onnxruntime and PyTorch results
[np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol) for out, ort_out in zip(outputs, ort_outs)]
# These set of tests verify ONNX model export and compares outputs between
# PyTorch and ORT.
class ONNXExporterTest(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True # For IR version 3 type export.
def setUp(self):
torch.manual_seed(0)
pytorch_export_contrib_ops.register()
def run_test(
self,
model,
input=None,
custom_opsets=None,
batch_size=2,
rtol=0.001,
atol=1e-7,
do_constant_folding=True,
dynamic_axes=None,
test_with_inputs=None,
input_names=None,
output_names=None,
):
model.eval()
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
# export the model to ONNX
f = io.BytesIO()
torch.onnx.export(
model,
input_copy,
f,
opset_version=self.opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
custom_opsets=custom_opsets,
)
# compute onnxruntime output prediction
ort_sess = onnxruntime.InferenceSession(f.getvalue(), providers=onnxruntime.get_available_providers())
input_copy = copy.deepcopy(input)
ort_test_with_input(ort_sess, input_copy, output, rtol, atol)
# if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
if test_with_inputs is not None:
for test_input in test_with_inputs:
if isinstance(test_input, torch.Tensor):
test_input = (test_input,)
test_input_copy = copy.deepcopy(test_input)
output = model(*test_input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_test_with_input(ort_sess, test_input, output, rtol, atol)
def test_inverse(self):
class CustomInverse(torch.nn.Module):
def forward(self, x):
return torch.inverse(x) + x
x = torch.randn(2, 3, 3)
self.run_test(CustomInverse(), x, custom_opsets={"com.microsoft": 1})
def test_gelu(self):
model = torch.nn.GELU()
x = torch.randn(3, 3)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
def test_gelu_is_fused_by_default(self):
model = torch.nn.GELU()
f = io.BytesIO()
torch.onnx.export(
model,
torch.randn(3, 3),
f,
opset_version=self.opset_version,
custom_opsets={"com.microsoft": 1},
)
f.seek(0)
onnx_model = onnx.load(f)
node = onnx_model.graph.node[0]
self.assertEqual(node.op_type, "Gelu")
self.assertEqual(node.domain, "com.microsoft")
@parameterized.parameterized.expand([("default_approximate", "none"), ("tanh_approximate", "tanh")])
@unittest.skipIf(_torch_version_lower_than("1.12"), "Gelu's approximate parameter unsupported in PyTorch < 1.12")
def test_gelu_supports_approximate_param(self, _, approximate: str):
# The approximate param was introduced in PyTorch 1.12.
# So we need to ignore the type checking when calling nn.Gelu
model = torch.nn.GELU(approximate=approximate) # type: ignore[call-arg]
x = torch.randn(3, 3)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
def test_triu(self):
for i in range(-5, 5):
class Module(torch.nn.Module):
def forward(self, input):
return input.triu(diagonal=i)
model = Module()
x = torch.randn(5, 4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 4, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
for i in range(-5, 5):
class Module2D(torch.nn.Module):
def forward(self, input):
return input.triu(diagonal=i)
model = Module2D()
x = torch.randn(4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
def test_tril(self):
for i in range(-5, 5):
class Module(torch.nn.Module):
def forward(self, input):
return input.tril(diagonal=i)
model = Module()
x = torch.randn(5, 4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 4, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
for i in range(-5, 5):
class Module2D(torch.nn.Module):
def forward(self, input):
return input.tril(diagonal=i)
model = Module2D()
x = torch.randn(4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
# opset 9 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
ONNXExporterTest_opset9_IRv4 = type(
str("TestONNXRuntime_opset9_IRv4"),
(unittest.TestCase,),
dict(ONNXExporterTest.__dict__, keep_initializers_as_inputs=False),
)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: future_fstrings -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from six.moves import shlex_quote
import utils as ut
def new_cmd(session, name, cmd, load_path, shell):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(shlex_quote(str(v)) for v in cmd)
return name, "tmux send-keys -t {}:{} {} Enter".format(session, name, shlex_quote(cmd))
def create_commands(args, shell='bash'):
ut.train.prepare_dirs(args)
actual_args = ut.io.get_cmd(as_list=True)
actual_cmd = ' '.join(actual_args)
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=',
sys.executable, 'main.py',
'--load_path', args.load_path,
'--start_port', args.start_port,
'--num_gpu', ut.misc.count_gpu(),
] + actual_args
cmds_map = [
("dummy", "tmux send-keys -t {}:0 Enter".format(args.tag)),
new_cmd(args.tag, "ps", base_cmd + ["--job_name", "ps"], args.load_path, shell),
]
if args.loss == 'l2':
gpu_task_num = 1
elif args.loss == 'gan':
gpu_task_num = 2
for idx in range(args.num_workers):
if idx < gpu_task_num and args.num_gpu > 0: # gpu workers
cmd = [base_cmd[0] + str(min(args.num_gpu, max(0, args.num_gpu - idx - 1)))] + base_cmd[1:]
else:
cmd = base_cmd[:]
cmd += ["--job_name", "worker", "--task", str(idx)]
cmds_map += [new_cmd(args.tag, "w-%d" % idx, cmd, args.load_path, shell)]
tmp_tb_dir = "/".join(sys.executable.split('/')[:-1])
tmp_tb_path = os.path.join(tmp_tb_dir, "tensorboard")
if os.path.exists(tmp_tb_path):
tb = tmp_tb_dir + "/tensorboard"
else:
tb = "tensorboard"
tb_args = [tb, "--logdir", args.log_dir, "--port", "12345"]
cmds_map += [new_cmd(args.tag, "tb", tb_args, args.load_path, shell)]
cmds_map += [new_cmd(args.tag, "htop", ["htop"], args.load_path, shell)]
windows = [v[0] for v in cmds_map]
notes = []
cmds = []
notes += ["Use `tmux attach -t {}` to watch process output".format(args.tag)]
notes += ["Use `tmux kill-session -t {}` to kill the job".format(args.tag)]
notes += ["Point your browser to http://localhost:12345 to see Tensorboard"]
cmds += [
# kill any process using tensorboard's port
f"kill $( lsof -i:{args.tb_port} -t ) > /dev/null 2>&1",
# kill any processes using ps / worker ports
f"kill $( lsof -i:{args.start_port}-{args.num_workers + args.start_port} -t ) > /dev/null 2>&1",
f"tmux kill-session -t {args.tag}",
f"tmux new-session -s {args.tag} -n {windows[0]} -d {shell}",
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(args.tag, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds, notes
def run(args):
cmds, notes = create_commands(args)
if args.dry_run:
print("Dry-run mode due to -n flag, otherwise the following commands would be executed:")
else:
print("Executing the following commands:")
print("\n".join(cmds))
print("")
if not args.dry_run:
os.environ["TMUX"] = ""
os.system("\n".join(cmds))
print('\n'.join(notes))
if __name__ == "__main__":
from config import get_args
args = get_args()
run(args)
|
from stable_baselines_custom.deepq.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
from stable_baselines_custom.deepq.build_graph import build_act, build_train # noqa
from stable_baselines_custom.deepq.dqn import DQN
from stable_baselines_custom.common.buffers import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
"""
wrap the environment in atari wrappers for DQN
:param env: (Gym Environment) the environment
:return: (Gym Environment) the wrapped environment
"""
from stable_baselines_custom.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
|
"""This module contains all the settings for the application"""
from harvey import utils
PROXIES = {
'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'
}
# Crawler settings
REQUEST_CONCURRENCY = 100
REQUEST_TIMEOUT = 4
REQUEST_DOWNLOAD_TIMEOUT = 10
REQUEST_TTL = 3
REQUEST_USER_AGENT = 'harvey bot 0.0.1'
_LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s [PID:%(process)d] %(asctime)s %(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'filename': 'harvey.log',
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5,
'formatter': 'simple'
}
},
'loggers': {
'root': {
'level': 'WARNING',
'handlers': ['console']
},
'gevent_utils': {
'level': 'DEBUG',
'handlers': ['console']
},
'harvey': {
'level': 'DEBUG',
'handlers': ['console']
}
}
}
try:
from local_settings import *
except ImportError:
pass
if 'LOGGING' not in locals():
LOGGING = {}
LOGGING = utils.update(_LOGGING, LOGGING)
|
# -*- coding: utf-8 -*-
import os
import flask
from flask import url_for, request
from heroku import HerokuRequest
app = flask.Flask(__name__)
# Application variables
app.secret_key = 'Your secret key.'
debug_mode = os.environ.get('DEBUG', False) == '1'
http_prefix = 'https://'
server_name = os.environ['SERVER_NAME']
app.debug = debug_mode
if app.debug:
pass
else:
app.config.update(SERVER_NAME=server_name)
app.request_class = HerokuRequest
app.config.update(HTTP_PREFIX=http_prefix)
def build_api_link(service_name, callback_url):
"""
Utility for building UDID.io API links
"""
api_link = 'https://get.udid.io/thirdparty/api/?callback=%(callback)s&service=%(service)s&schemeurl=0' % {
'callback': callback_url,
'service': service_name
}
return api_link
@app.route('/')
def index():
"""
Homepage endpoint
The homepage contains the link "Check in the device", which will pass
user through UDID obtaining process
"""
callback_url = ''.join((http_prefix, server_name, url_for('.postback')))
api_link = build_api_link('UDID registration test app', callback_url)
return '<a href="%s">Check in the device.</a>' % api_link
@app.route('/postback', methods=['POST'])
def postback():
"""
POST callback endpoint
The UDID.io service will send a POST request to this endpoint.
Values send with the POST request will contain UDID, IMEI, Product, Version and Serial No.
"""
udid = request.form.get('udid')
# fields
imei = request.form.get('imei')
product = request.form.get('product')
version = request.form.get('version')
serial = request.form.get('serial')
return 'Device UDID is: %s' % udid
if __name__ == "__main__":
if app.debug:
app.run(host='0.0.0.0', port=8000, debug=True)
else:
print """To run application you should use application
server or run it with DEBUG=1 environmental variable set"""
|
from __future__ import absolute_import
import unittest
from pysasl.creds import StoredSecret, AuthenticationCredentials
from pysasl.hashing import BuiltinHash
builtin_hash = BuiltinHash(rounds=1000)
password_sha256 = '6f3b2db13d217e79d70d43d326a6e485756bcbe1b4e959f3e86c0d9eb' \
'62fa40a352c178b1fc30896e7c484d74a78561d'
password_sha512 = '1339152519f33e66bf15837624ce57563f680e5d2a2700a5016cb087c' \
'5c05b3e22ba040a32f9453dbcb13071966bdb88cf5e8b0be68c3026094ff67bf03475c2' \
'2a15e9e39d5fcbe07a0c62296f155999'
class TestHashing(unittest.TestCase):
def test_builtin_good(self) -> None:
creds = AuthenticationCredentials('username', 'password')
stored = StoredSecret(password_sha256, hash=builtin_hash)
self.assertTrue(creds.check_secret(stored))
def test_builtin_invalid(self) -> None:
creds = AuthenticationCredentials('username', 'invalid')
stored = StoredSecret(password_sha256, hash=builtin_hash)
self.assertFalse(creds.check_secret(stored))
def test_builtin_copy(self) -> None:
creds = AuthenticationCredentials('username', 'password')
builtin_copy = builtin_hash.copy()
stored = StoredSecret(password_sha256, hash=builtin_copy)
self.assertTrue(creds.check_secret(stored))
builtin_copy = builtin_hash.copy(hash_name='sha512')
stored = StoredSecret(password_sha512, hash=builtin_copy)
self.assertTrue(creds.check_secret(stored))
def test_cleartext_good(self) -> None:
creds = AuthenticationCredentials('username', 'password')
self.assertTrue(creds.check_secret(StoredSecret('password')))
def test_cleartext_invalid(self) -> None:
creds = AuthenticationCredentials('username', 'invalid')
self.assertFalse(creds.check_secret(StoredSecret('password')))
def test_cleartext_copy(self) -> None:
creds = AuthenticationCredentials('username', 'password')
stored = StoredSecret('password')
self.assertTrue(creds.check_secret(stored))
stored = StoredSecret('password', hash=stored.hash.copy())
self.assertTrue(creds.check_secret(stored))
def test_none(self) -> None:
creds = AuthenticationCredentials('username', 'password')
self.assertFalse(creds.check_secret(None))
|
from Service import Service
from Queue import Queue
class Node(object):
# Constructor.
def __init__(self, service_name, to_indices=[]):
if to_indices is None:
to_indices = []
self.service_name = service_name
self.to_indices = to_indices
def to_string(self):
rtn = [self.service_name, str(len(self.to_indices))]
for to_index in self.to_indices:
rtn.append(str(to_index))
return str(rtn)
class Graph(object):
# Constructor.
def __init__(self, node_list):
self.node_list = node_list
# Start index is always initialized to 0
# TODO: get rid of start_index and pass it into ThriftClient.infer()
# since it is only used when traversing through the graph
self.start_index = 0
# Validate.
global_has_seen = set()
start_node = self.get_node(self.start_index)
fringe = Queue()
has_seen = set()
fringe.put(start_node)
has_seen.add(start_node)
while not fringe.empty():
curr_node = fringe.get()
for to_index in curr_node.to_indices:
to_node = self.get_node(to_index)
if to_node in has_seen:
if 'DCM' not in curr_node.service_name:
print 'Invalid graph: cyclic without decision node'
exit()
else:
fringe.put(to_node)
has_seen.add(to_node)
global_has_seen = has_seen.union(global_has_seen)
if len(global_has_seen) < len(node_list):
print 'Invalid graph: unconnected'
# Create a set of service names for fast look-up.
self.service_names = set()
for node in node_list:
self.service_names.add(node.service_name)
def get_node(self, index):
if index < 0 or index >= len(self.node_list):
print 'Invalid index'
exit()
else:
return self.node_list[index]
def get_next_index(self, curr_node, next_service_name):
for index in curr_node.to_indices:
if self.get_node(index).service_name == next_service_name:
return index
print 'Invalid next service ' + next_service_name
exit()
def to_string(self):
rtn = ''
for node in self.node_list:
rtn += node.to_string()
rtn += ', '
rtn += 'and start index: '
rtn += str(self.start_index)
return rtn
def has_service(self, service_name):
return service_name in self.service_names
|
import tensorflow as tf
from tensorflow.contrib import layers
from config import MovieQAPath
from raw_input import Input
_mp = MovieQAPath()
hp = {'emb_dim': 256, 'feat_dim': 512, 'dropout_rate': 0.1}
def dropout(x, training):
return tf.layers.dropout(x, hp['dropout_rate'], training=training)
def l2_norm(x, axis=None):
if axis is None:
axis = 1
return tf.nn.l2_normalize(x, axis=axis)
def unit_norm(x, dim=2):
return layers.unit_norm(x, dim=dim, epsilon=1e-12)
def dense(x, units=hp['emb_dim'], use_bias=True, activation=tf.nn.relu, reuse=False):
return tf.layers.dense(x, units, activation=activation, use_bias=use_bias, reuse=reuse)
class Model(object):
def __init__(self, data, beta=0.0, training=False):
self.data = data
reg = layers.l2_regularizer(beta)
initializer = tf.orthogonal_initializer()
def dense_kernel(inp, width, in_c, out_c, factor=4):
k1 = tf.layers.dense(inp, in_c * width * factor, # tf.nn.tanh,
kernel_initializer=initializer, kernel_regularizer=reg)
# k1 = dropout(k1, training)
k1 = tf.reshape(k1, [width, in_c, factor])
k2 = tf.layers.dense(inp, out_c * width * factor, # tf.nn.tanh,
kernel_initializer=initializer, kernel_regularizer=reg)
# k2 = dropout(k2, training)
k2 = tf.reshape(k2, [width, factor, out_c])
k = tf.matmul(k1, k2)
k = l2_norm(k, [0, 1])
return k
def dense_bias(inp, out_c):
k = tf.layers.dense(inp, out_c, # tf.nn.relu,
kernel_initializer=initializer, kernel_regularizer=reg)
# k = dropout(k, training)
k = tf.reshape(k, [out_c])
return k
with tf.variable_scope('Embedding_Linear'):
self.ques = self.data.ques
self.ans = self.data.ans
self.subt = self.data.subt
self.ques = l2_norm(self.ques)
self.ans = l2_norm(self.ans)
self.subt = l2_norm(self.subt)
with tf.variable_scope('Question'):
self.ques = dropout(tf.layers.dense(
self.ques, hp['emb_dim'], # tf.nn.relu,
kernel_initializer=initializer, kernel_regularizer=reg), training)
self.ques = l2_norm(self.ques)
# (3, E_t)
q_k_1 = dense_kernel(self.ques, 3, hp['emb_dim'], hp['emb_dim'], 1)
q_b_1 = dense_bias(self.ques, hp['emb_dim'])
# (3, E_t)
q_k_2 = dense_kernel(self.ques, 3, hp['emb_dim'], hp['emb_dim'] // 2, 1)
q_b_2 = dense_bias(self.ques, hp['emb_dim'] // 2)
# (3, E_t)
q_k_3 = dense_kernel(self.ques, 3, hp['emb_dim'] // 2, hp['emb_dim'] // 4, 1)
q_b_3 = dense_bias(self.ques, hp['emb_dim'] // 4)
# (3, E_t)
q_k_4 = dense_kernel(self.ques, 3, hp['emb_dim'] // 4, hp['emb_dim'] // 8, 1)
q_b_4 = dense_bias(self.ques, hp['emb_dim'] // 8)
with tf.variable_scope('Answers_Subtitles'):
# (5, E_t)
self.ans = dropout(tf.layers.dense(
self.ans, hp['emb_dim'], # tf.nn.relu,
kernel_initializer=initializer, kernel_regularizer=reg), training)
self.ans = l2_norm(self.ans)
self.subt = tf.expand_dims(self.subt, 0)
# self.subt = tf.layers.average_pooling1d(self.subt, 3, 3)
# (N, E_t)
self.subt = dropout(tf.layers.dense(
self.subt, hp['emb_dim'], # tf.nn.relu,
kernel_initializer=initializer, reuse=True), training)
self.subt = l2_norm(self.subt)
# (1, N, E_t)
s_exp = self.subt
# (1, 1, E_t)
q_exp = tf.expand_dims(self.ques, 0)
# (1, 5, E_t)
a_exp = tf.expand_dims(self.ans, 0)
s_shape = tf.shape(self.subt)
with tf.variable_scope('Abstract'):
# (1, N, E_t)
self.conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv1d(s_exp, q_k_1, 1, padding='SAME'), q_b_1))
self.conv1 = l2_norm(self.conv1, 2)
# (1, N, E_t)
self.conv2 = tf.nn.relu(
tf.nn.bias_add(tf.nn.convolution(self.conv1, q_k_2, dilation_rate=[2], padding='SAME'), q_b_2))
self.conv2 = l2_norm(self.conv2, 2)
# (1, N, E_t)
self.conv3 = tf.nn.relu(
tf.nn.bias_add(tf.nn.convolution(self.conv2, q_k_3, dilation_rate=[3], padding='SAME'), q_b_3))
self.conv3 = l2_norm(self.conv3, 2)
# (1, N, E_t)
self.conv4 = tf.nn.relu(
tf.nn.bias_add(tf.nn.convolution(self.conv3, q_k_4, dilation_rate=[4], padding='SAME'), q_b_4))
self.conv4 = l2_norm(self.conv4, 2)
# (N, E_t, E_t / 8)
self.confuse = tf.matmul(tf.transpose(s_exp, [1, 2, 0]), tf.transpose(self.conv4, [1, 0, 2]))
# (E_t / 8, N, E_t)
self.confuse = tf.transpose(self.confuse, [2, 0, 1])
# (E_t / 8, 5, N_t)
self.response = tf.matmul(tf.tile(a_exp, [tf.shape(self.confuse)[0], 1, 1]),
self.confuse, transpose_b=True)
# (E_t / 8, 5, 8)
self.top_k_response, _ = tf.nn.top_k(self.response, 8)
# (5, E_t / 8)
self.top_k_response = tf.transpose(tf.reduce_sum(self.top_k_response, 2))
# (5, 2)
self.top_k_output, _ = tf.nn.top_k(self.top_k_response, 2)
# (1, 5)
self.output = tf.transpose(tf.reduce_sum(self.top_k_output, 1, keepdims=True))
def main():
data = Input(split='train', mode='subt')
model = Model(data)
for v in tf.global_variables():
print(v)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run([model.data.initializer, tf.global_variables_initializer()],
feed_dict=data.feed_dict)
# q, a, s = sess.run([model.ques_enc, model.ans_enc, model.subt_enc])
# print(q.shape, a.shape, s.shape)
# a, b, c, d = sess.run(model.tri_word_encodes)
# print(a, b, c, d)
# print(a.shape, b.shape, c.shape, d.shape)
a, b = sess.run([model.abstract, model.attn])
print(a, b)
print(a.shape, b.shape)
if __name__ == '__main__':
main()
|
import copy
import os
import sys
import urllib.request
from sure_tosca_client import Configuration, ApiClient, NodeTemplate
from sure_tosca_client.api import default_api
import networkx as nx
import matplotlib.pyplot as plt
class ToscaHelper:
def __init__(self, sure_tosca_base_url, tosca_template_path):
self.sure_tosca_base_url = sure_tosca_base_url
self.tosca_template_path = tosca_template_path
self.tosca_client = self.init_sure_tosca_client(sure_tosca_base_url)
self.doc_id = self.upload_tosca_template(tosca_template_path)
def upload_tosca_template(self, file_path):
file_id = self.tosca_client.upload_tosca_template(file_path)
return file_id
def init_sure_tosca_client(self,sure_tosca_base_path):
configuration = Configuration()
configuration.host = sure_tosca_base_path
api_client = ApiClient(configuration=configuration)
api = default_api.DefaultApi(api_client=api_client)
return api
def get_interface_types(target):
interface_types = []
for interface in target.node_template.interfaces:
interface_types.append(interface)
return interface_types
def get_application_nodes(self):
return self.tosca_client.get_node_templates(self.doc_id, type_name='tosca.nodes.QC.Application')
def get_deployment_node_pipeline(self):
nodes_to_deploy = self.get_application_nodes()
G = nx.DiGraph()
sorted_nodes = []
for node in nodes_to_deploy:
related_nodes = self.tosca_client.get_related_nodes(self.doc_id,node.name)
for related_node in related_nodes:
G.add_edge(node.name, related_node.name)
# # We need to deploy the docker orchestrator on the VMs not the topology.
# # But the topology is directly connected to the orchestrator not the VMs.
# # So we explicitly get the VMs
# # I don't like this solution but I can't think of something better.
# if related_node.node_template.type == 'tosca.nodes.QC.VM.topology':
# vms = self.get_vms()
# related_node = vms
# pair = (related_node, node)
# nodes_pairs.append(pair)
sorted_graph = sorted(G.in_degree, key=lambda x: x[1], reverse=True)
for node_tuple in sorted_graph:
node_name = node_tuple[0]
for node in nodes_to_deploy:
if node.name == node_name:
sorted_nodes.append(node)
return sorted_nodes
@classmethod
def service_is_up(cls, url):
code = None
try:
code = urllib.request.urlopen(url).getcode()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
if not e.reason and not e.reason.errno and e.code:
return False
else:
return True
return True
def get_vms(self):
return self.tosca_client.get_node_templates(self.doc_id, type_name='tosca.nodes.QC.VM.Compute')
def set_node(self, updated_node, tosca_template_dict):
node_templates = tosca_template_dict['topology_template']['node_templates']
for node_name in node_templates:
if node_name == updated_node.name:
node_templates[node_name] = updated_node.node_template.to_dict()
return tosca_template_dict
def get_interface_types(node):
interface_type_names = []
if node.node_template.interfaces:
for interface in node.node_template.interfaces:
interface_type_names.append(interface)
return interface_type_names
|
# -*- coding: utf-8 -*-
!pip install torch
!pip install pytorch_transformers
!pip install transformers
!pip install IProgress
from __future__ import absolute_import, division, print_function
import json
import os
import torch
import torch.nn.functional as F
from nltk import word_tokenize
from pytorch_transformers import (BertConfig, BertForTokenClassification,
BertTokenizer)
class BertNer(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, valid_ids=None):
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
batch_size,max_len,feat_dim = sequence_output.shape
valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cuda' if torch.cuda.is_available() else 'cpu')
for i in range(batch_size):
jj = -1
for j in range(max_len):
if valid_ids[i][j].item() == 1:
jj += 1
valid_output[i][jj] = sequence_output[i][j]
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
return logits
class Ner:
def __init__(self,model_dir: str):
self.model , self.tokenizer, self.model_config = self.load_model(model_dir)
self.label_map = self.model_config["label_map"]
self.max_seq_length = self.model_config["max_seq_length"]
self.label_map = {int(k):v for k,v in self.label_map.items()}
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = self.model.to(self.device)
self.model.eval()
def load_model(self, model_dir: str, model_config: str = "model_config.json"):
model_config = os.path.join(model_dir,model_config)
model_config = json.load(open(model_config))
model = BertNer.from_pretrained(model_dir)
tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=model_config["do_lower"])
return model, tokenizer, model_config
!pip install ipywidgets
!pip install tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
from ipywidgets import IntProgress
tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
model = BertNer.from_pretrained('/Users/Pablo/Downloads/pytorch')
|
from typing import List
class Solution:
"""
头尾双指针
"""
def exchange(self, nums: List[int]) -> List[int]:
head, tail = 0, len(nums) - 1
while head < tail:
if nums[head] % 2 != 0:
head += 1
continue
if nums[tail] % 2 != 1:
tail -= 1
continue
nums[head], nums[tail] = nums[tail], nums[head]
head += 1
tail -= 1
return nums
if __name__ == '__main__':
test_cases = [[], [24, 6, 9], [3, 4, 99, 80], [1, 2, 3, 4]]
for case in test_cases:
ans = Solution().exchange(case)
print(ans)
|
#This file was used for stripping the brackets [] from the data set
#No need to run this file again. The new data set is saved in new_data.csv
import pandas as pd
data = pd.read_csv("data.csv", index_col="index")
counter = 0
for index in data.index:
lyric = data.loc[index, 'Lyric']
allInd = []
begIndex = 0
endIndex = 0
for i, letter in enumerate(lyric):
if letter == '[':
begIndex = i
for j, let in enumerate(lyric[i:]):
if let == ']':
endIndex = j+i+1
allInd = allInd + [(begIndex, endIndex)]
begIndex = 0
endIndex = 0
break
for i in allInd:
lyric = lyric[:i[0]] + lyric[i[1]:]
data.loc[index,'Lyric'] = lyric
print(index)
data.to_csv("new_data.csv")
|
import logging
import json
import requests as r
from requests.packages.urllib3.util.retry import Retry
import re
from django.conf import settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.propagate = False
def handle_error_msg(errors):
"""
convenience function for handling errors in response from requests
"""
if "message" in errors.keys():
logger.error("Error: %s", errors["message"])
else:
logger.error("Error: but no message")
def get_requests_session(url):
"""
"""
retry_strategy = Retry(total=2, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
adapter = r.adapters.HTTPAdapter(max_retries=retry_strategy)
session = r.Session()
session.mount(url, adapter)
return session
def post_to_slack(msg, timeout=3.0):
"""
Post a message to slack channel via webhook.
The method accepts a msg string
timeout is an option parameters which defaults to three seconds to prevent
posting to slack from slowing down the ingest too much.
"""
if settings.SLACK_WEBHOOK:
session = get_requests_session(settings.SLACK_WEBHOOK)
header = {"Content-type": "application/json"}
payload = {"text": msg}
response = session.post(settings.SLACK_WEBHOOK, headers=header, json=payload, timeout=timeout)
status = response.status_code
if status != 200:
logger.error("status %d", status)
else:
logger.debug("Slack posting a success: %s", status)
else:
logger.info("Slack webhook not configured")
def post_to_slack_if_meertime(msg, proposal):
"""
Post to slack but only if observation is a meertime observation
as determined by the proposal code
return True if proposal matched the used pattern, False otherwise
"""
meertime_pattern = re.compile("SCI\S*MB\S*")
if meertime_pattern.match(proposal):
post_to_slack(msg)
return True
else:
logging.debug("Not posting to slack as observation is not meertime")
return False
|
import pandas as pd
import numpy as np
import igraph as g
import json
from time import time
import geopandas as gpd
STREAM_FILEPATH = 'D/DNR HYDRO/corrected streams.geojson'
LAKES_CLEAN_FILEPATH = 'D/DNR HYDRO/lakes clean.geojson'
with open(STREAM_FILEPATH) as f:
data = json.load(f)
def coord_to_str(xyz):
return str(round(xyz[0])) + ', ' + str(round(xyz[1]))
G = g.Graph(directed=True)
# oddly have to initialize name attribute
G.add_vertex('xxx')
counter = 0
tenpct = int(len(data['features'])/10)
t = time()
for f in data['features']:
counter +=1
if counter % tenpct == 0:
print(str(int(counter / tenpct)* 10) + '%')
print(time()-t)
t = time()
fpoint = coord_to_str(f['geometry']['coordinates'][0][0])
lpoint = coord_to_str(f['geometry']['coordinates'][0][-1])
if fpoint not in G.vs['name']:
G.add_vertex(fpoint)
if lpoint not in G.vs['name']:
G.add_vertex(lpoint)
G.add_edge(fpoint, lpoint,
length=f['properties']['Shape_Leng'],
deposit_lake=f['properties'].get('deposit lake'),
source_lake=f['properties'].get('source lake'))
g.save(G, 'D/DNR HYDRO/corrected streams igraph.pickle')
G = g.read('D/DNR HYDRO/corrected streams igraph.pickle')
def upstream_lakes(G,dowlknum):
df = pd.DataFrame(index=range(len(G.vs)))
dep_es = G.es.select(deposit_lake_eq=dowlknum)
if len(dep_es) == 0:
return pd.DataFrame(columns=['lake','distance'])
for i in range(len(dep_es)):
df[str(i)] = G.shortest_paths_dijkstra(source=dep_es[i].source, weights='length')[0]
df['short dist'] = df.apply(min, axis=1)
df = df[df['short dist'] < np.inf]
df = df[df['short dist'] >= 0]
#now we have all attached vertices and the shortest difference to them
src_lakes = []
dists = []
for v in df.index:
for e in G.es(_target = v):
if e['source_lake'] != '':
src_lakes.append(e['source_lake'])
dists.append(df.loc[v,'short dist'])
break #once we get on source lake there cant be any more
ld = pd.DataFrame({'lake':src_lakes,
'distance':dists})
#in a rare case we can get two streams that go form one lake to another.
# that would result in two dists to the same lake
ld = pd.DataFrame(ld.groupby('lake').min()).reset_index()
return ld
lakes = gpd.read_file(LAKES_CLEAN_FILEPATH)
dowlknums_str = lakes['dowlknum'].apply(lambda x: str(x).zfill(8))
sdmat = np.empty((len(lakes),len(lakes)))
sdmat.fill(np.nan)
tenpct = int(len(lakes) / 10)
counter = 0
t= time()
for i in dowlknums_str.index: #the index is 0,1,2,3,4...
counter +=1
if counter % tenpct == 0:
print(str(int(counter / tenpct)* 10) + '%')
print(time()-t)
t = time()
up_lakes = upstream_lakes(G,dowlknums_str[i])
for i2 in up_lakes.index:
#get the location of this lake in the distance matrix
#some lakes will be in the network, but not the cleaned lakes file
# they can be ignored
try:
j = dowlknums_str[dowlknums_str == up_lakes['lake'][i2]].index[0]
except IndexError:
continue
if j != i:
sdmat[i,j] = up_lakes['distance'][i2]
np.save('D/upstream dist matrix',sdmat)
#essentially reflect the matrix over the diagonal to get the down stream.
#in rare cases there will be a stream loop and there will be nonmissing distances in both directions.
# in that case choose the shorter distance
for i in range(sdmat.shape[0]):
for j in range(sdmat.shape[1]):
if i >= j:
continue
if (sdmat[i,j] >= 0) & (np.isnan(sdmat[j,i])):
sdmat[j, i] = sdmat[i,j]
elif (sdmat[j, i] >= 0) & (np.isnan(sdmat[i, j])):
sdmat[i, j] = sdmat[j, i]
elif (sdmat[j, i] >= 0) & (sdmat[i, j] >=0 ):
print('whoa',i,j)
if sdmat[i,j] > sdmat[j,i]:
sdmat[i,j] = sdmat[j,i]
np.save('D/updownstream dist matrix',sdmat)
#matrix stats
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import gzip
from neon.util.persist import ensure_dirs_exist, pickle_load, valid_path_append, fetch_file
import os
from tqdm import tqdm
import numpy as np
from PIL import Image
class MNIST(object):
"""
Arguments:
path (str): Local path to copy data files.
"""
def __init__(self, path='.'):
self.path = path
self.url = 'https://s3.amazonaws.com/img-datasets'
self.filename = 'mnist.pkl.gz'
self.size = 15296311
def load_data(self):
"""
Fetch the MNIST dataset and load it into memory.
Arguments:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
Returns:
tuple: Both training and test sets are returned.
"""
workdir, filepath = valid_path_append(self.path, '', self.filename)
if not os.path.exists(filepath):
fetch_file(self.url, self.filename, filepath, self.size)
with gzip.open(filepath, 'rb') as f:
self.train_set, self.valid_set = pickle_load(f)
self.train_set = {'image': {'data': self.train_set[0].reshape(60000, 28, 28),
'axes': ('N', 'H', 'W')},
'label': {'data': self.train_set[1],
'axes': ('N',)}}
self.valid_set = {'image': {'data': self.valid_set[0].reshape(10000, 28, 28),
'axes': ('N', 'H', 'W')},
'label': {'data': self.valid_set[1],
'axes': ('N',)}}
return self.train_set, self.valid_set
def ingest_mnist(root_dir, overwrite=False):
'''
Save MNIST dataset as PNG files
'''
out_dir = os.path.join(root_dir, 'mnist')
set_names = ('train', 'val')
manifest_files = [os.path.join(out_dir, setn + '-index.csv') for setn in set_names]
if (all([os.path.exists(manifest) for manifest in manifest_files]) and not overwrite):
return manifest_files
dataset = {k: s for k, s in zip(set_names, MNIST(out_dir, False).load_data())}
# Write out label files and setup directory structure
lbl_paths, img_paths = dict(), dict(train=dict(), val=dict())
for lbl in range(10):
lbl_paths[lbl] = ensure_dirs_exist(os.path.join(out_dir, 'labels', str(lbl) + '.txt'))
np.savetxt(lbl_paths[lbl], [lbl], fmt='%d')
for setn in ('train', 'val'):
img_paths[setn][lbl] = ensure_dirs_exist(os.path.join(out_dir, setn, str(lbl) + '/'))
# Now write out image files and manifests
for setn, manifest in zip(set_names, manifest_files):
records = []
for idx, (img, lbl) in enumerate(tqdm(zip(*dataset[setn]))): # noqa pylint: disable=zip-builtin-not-iterating
img_path = os.path.join(img_paths[setn][lbl], str(idx) + '.png')
im = Image.fromarray(img)
im.save(os.path.join(out_dir, img_path), format='PNG')
records.append((os.path.relpath(img_path, out_dir),
os.path.relpath(lbl_paths[lbl], out_dir)))
np.savetxt(manifest, records, fmt='%s,%s')
return manifest_files
|
"""
test_htmlgen.py
"""
import pytest
from htmlgen import HtmlGen
@pytest.mark.sanity
@pytest.mark.sunshine
def test_creation():
x = HtmlGen()
assert x is not None
def test_p_tagify():
x = HtmlGen()
assert x.output("X") == "<p>X</p>"
def test_output():
x = HtmlGen()
assert x.output("whatever") == "<p>whatever</p>"
|
# Generated by Django 2.2.6 on 2020-06-06 17:24
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0016_auto_20200606_1953'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-pub_date',)},
),
migrations.AlterField(
model_name='comment',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Дата создания'),
),
migrations.AlterField(
model_name='follow',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
migrations.AlterField(
model_name='follow',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL, verbose_name='Подписчик'),
),
]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File: config_test.py
Description: 测试环境配置文件
Author: Danny
Create Date: 2020/04/09
-------------------------------------------------
Modify:
2020/04/09:
-------------------------------------------------
"""
"""MySQL数据库配置"""
MYSQL = {
'DB_HOST': '',
'DB_USER': '',
'DB_PASS': '',
'DB_PORT': 3306
}
DATABASE = {
'': ''
} |
#!/usr/bin/env python
# coding: utf-8
'''
Author: Kazuto Nakashima
URL: https://github.com/kazuto1011/grad-cam-pytorch
USAGE: python visualize.py --config_file configs/VGG.yaml --target_layer=features.28
USAGE: python visualize.py --config_file configs/ResNet.yaml --target_layer=layer4
USAGE: python visualize.py --config_file configs/DenseNet.yaml --target_layer=features
USAGE: python visualize.py --config_file configs/EfficientNet.yaml --target_layer=_conv_head
'''
from __future__ import print_function
import os
import copy
import random
import warnings
import fire
import click
import cv2
import matplotlib.cm as cm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from torchvision import models, transforms
import albumentations as A
from albumentations.pytorch import ToTensorV2
from efficientnet_pytorch import EfficientNet
from checkpoint import load_checkpoint
from flags import Flags
from utils import (
get_network,
BackPropagation,
Deconvnet,
GradCAM,
GuidedBackPropagation,
occlusion_sensitivity,
)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# if a model includes LSTM, such as in image captioning,
# torch.backends.cudnn.enabled = False
# Set random seed
seed = 42
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
def load_images(image_paths):
images = []
raw_images = []
print("Images:")
for i, image_path in enumerate(image_paths):
print("\t#{}: {}".format(i, image_path))
image, raw_image = preprocess(image_path)
images.append(image)
raw_images.append(raw_image)
return images, raw_images
def preprocess(image_path):
raw_image = cv2.imread(image_path, cv2.COLOR_BGR2RGB)
raw_image = cv2.resize(raw_image, (224,) * 2)
image = A.Compose([
A.Resize(224, 224),
ToTensorV2(),
])(image=raw_image)['image']
image = image/255.0
return image, raw_image
def save_gradient(filename, gradient):
gradient = gradient.cpu().numpy().transpose(1, 2, 0)
gradient -= gradient.min()
gradient /= gradient.max()
gradient *= 255.0
cv2.imwrite(filename, np.uint8(gradient))
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
gcam = gcam.cpu().numpy()
cmap = cm.jet_r(gcam)[..., :3] * 255.0
if paper_cmap:
alpha = gcam[..., None]
gcam = alpha * cmap + (1 - alpha) * raw_image
else:
gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
cv2.imwrite(filename, np.uint8(gcam))
# torchvision models
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
def main(target_layer, config_file, topk=1):
"""
Visualize model responses given multiple images
"""
options = Flags(config_file).get()
# Synset words
# classes = get_classtable()
classes = ['Fake', 'Real']
# Model from torchvision
model = get_network(options)
arch=options.network
image_paths=options.experiment.vis_input
output_dir=options.experiment.vis_output
file_name = image_paths.split('/')[-1].split('.')[0]
checkpoint = load_checkpoint(options.test_checkpoint, cuda=True)
model.load_state_dict(checkpoint['model'])
start_epoch = checkpoint["epoch"]
train_accuracy = checkpoint["train_accuracy"]
train_recall = checkpoint["train_recall"]
train_precision = checkpoint["train_precision"]
train_losses = checkpoint["train_losses"]
valid_accuracy = checkpoint["valid_accuracy"]
valid_recall = checkpoint["valid_recall"]
valid_precision = checkpoint["valid_precision"]
valid_losses = checkpoint["valid_losses"]
learning_rates = checkpoint["lr"]
model.to(options.device)
model.eval()
# summary(model, (3, 224, 224), 32)
print(model)
# Images
images, raw_images = load_images([image_paths])
images = torch.stack(images).to(options.device)
"""
Common usage:
1. Wrap your model with visualization classes defined in grad_cam.py
2. Run forward() with images
3. Run backward() with a list of specific classes
4. Run generate() to export results
"""
# =========================================================================
print("Vanilla Backpropagation:")
bp = BackPropagation(model=model)
probs, ids = bp.forward(images) # sorted
for i in range(topk):
bp.backward(ids=ids[:, [i]])
gradients = bp.generate()
# Save results as image files
for j in range(len(images)):
print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))
save_gradient(
filename=os.path.join(
output_dir,
"{}-{}-{}-vanilla-{}.png".format(j, file_name, arch, classes[ids[j, i]]),
),
gradient=gradients[j],
)
# Remove all the hook function in the "model"
bp.remove_hook()
# =========================================================================
print("Deconvolution:")
deconv = Deconvnet(model=model)
_ = deconv.forward(images)
for i in range(topk):
deconv.backward(ids=ids[:, [i]])
gradients = deconv.generate()
for j in range(len(images)):
print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))
save_gradient(
filename=os.path.join(
output_dir,
"{}-{}-{}-deconvnet-{}.png".format(j, file_name, arch, classes[ids[j, i]]),
),
gradient=gradients[j],
)
deconv.remove_hook()
# =========================================================================
print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")
gcam = GradCAM(model=model)
_ = gcam.forward(images)
gbp = GuidedBackPropagation(model=model)
_ = gbp.forward(images)
for i in range(topk):
# Guided Backpropagation
gbp.backward(ids=ids[:, [i]])
gradients = gbp.generate()
# Grad-CAM
gcam.backward(ids=ids[:, [i]])
regions = gcam.generate(target_layer=target_layer)
for j in range(len(images)):
print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))
# Guided Backpropagation
save_gradient(
filename=os.path.join(
output_dir,
"{}-{}-{}-guided-{}.png".format(j, file_name, arch, classes[ids[j, i]]),
),
gradient=gradients[j],
)
# Grad-CAM
save_gradcam(
filename=os.path.join(
output_dir,
"{}-{}-{}-gradcam-{}-{}.png".format(
j, file_name, arch, target_layer, classes[ids[j, i]]
),
),
gcam=regions[j, 0],
raw_image=raw_images[j],
)
# Guided Grad-CAM
save_gradient(
filename=os.path.join(
output_dir,
"{}-{}-{}-guided_gradcam-{}-{}.png".format(
j, file_name, arch, target_layer, classes[ids[j, i]]
),
),
gradient=torch.mul(regions, gradients)[j],
)
if __name__ == "__main__":
fire.Fire(main) |
from twilio.rest import Client
from credentials import account_sid, auth_token, my_cell, my_twilio
client = Client(account_sid, auth_token)
my_msg = """Hi, This is Kunal
Sudhanshu Bsdk
Bola na dusra Kaam Kar rha Hoon......"""
message = client.api.account.messages.create(to=my_cell, from_=my_twilio, body=my_msg) |
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.forms import model_to_dict
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.views import View
from apps.task_log.models import TaskLog
from apps.task_log.tasks import schedule_expense_group_creation
class TaskLogView(View):
"""
Task Log view
"""
template_name = 'task/tasks.html'
def get(self, request, workspace_id):
context = {"tasks_tab": "active"}
if request.GET.get('state') == 'complete':
task_logs = TaskLog.objects.filter(workspace__id=workspace_id, status='COMPLETE')
context["complete"] = "active"
elif request.GET.get('state') == 'failed':
task_logs = TaskLog.objects.filter(workspace__id=workspace_id).exclude(status='COMPLETE')
context["failed"] = "active"
else:
task_logs = TaskLog.objects.filter(workspace__id=workspace_id)
context["all"] = "active"
page = request.GET.get('page', 1)
paginator = Paginator(task_logs, 10)
try:
task_logs = paginator.page(page)
except PageNotAnInteger:
task_logs = paginator.page(1)
except EmptyPage:
task_logs = paginator.page(paginator.num_pages)
context["task_logs"] = task_logs
return render(request, self.template_name, context)
def post(self, request, workspace_id):
"""
Start synchronization
:param request:
:param workspace_id:
:return:
"""
value = request.POST.get('submit')
if value == 'sync':
schedule_expense_group_creation(workspace_id, request.user)
messages.success(request, 'Sync started successfully. Expenses will be exported soon!')
return HttpResponseRedirect(self.request.path_info)
class TaskLogDetailsView(View):
"""
Task log details view
"""
@staticmethod
def get(request, workspace_id, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
task_log_fields = model_to_dict(task_log)
task_log_fields["task_id"] = task_log.task_id
task_log_fields["type"] = task_log.type
task_log_fields["status"] = task_log.status
task_log_fields["started_at"] = task_log.created_at.strftime('%b. %d, %Y, %-I:%M %-p')
task_log_fields["stopped_at"] = task_log.updated_at.strftime('%b. %d, %Y, %-I:%M %p')
task_log_fields["invoice"] = '-' if task_log.invoice is None else task_log.invoice.invoice_id
task_log_fields["expense_group"] = '-' if task_log.expense_group is None else task_log.expense_group. \
description.get('report_id')
return JsonResponse(task_log_fields)
class TaskLogTextView(View):
"""
Task log text view
"""
@staticmethod
def get(request, workspace_id):
task_log = None
task_log_info = {}
if request.GET.get('type') == "task_log":
task_log_id = request.GET.get('id')
task_log = TaskLog.objects.get(id=task_log_id)
elif request.GET.get('type') == "expense_group":
expense_group_id = request.GET.get('id')
task_log = TaskLog.objects.filter(expense_group__id=expense_group_id).latest()
task_log_info["workspace_name"] = task_log.workspace.name
task_log_info["task_id"] = task_log.task_id
task_log_info["type"] = task_log.type
task_log_info["expense_group_id"] = '-' if task_log.expense_group is None else \
task_log.expense_group.description.get("report_id")
task_log_info["invoice_id"] = '-' if task_log.invoice is None else task_log.invoice.invoice_id
task_log_info["task_start_time"] = task_log.created_at.strftime('%b. %d, %Y, %-I:%M %-p')
task_log_info["task_stop_time"] = task_log.updated_at.strftime('%b. %d, %Y, %-I:%M %-p')
task_log_info["status"] = task_log.status
task_log_info["Task Result"] = task_log.detail
return JsonResponse(task_log_info)
|
from django import forms
from .models import Order
from django.contrib.auth.models import User
from accounttt.models import Profileee
class OrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['order_date'].label = 'Дата получения заказа'
order_date = forms.DateField(widget=forms.TextInput(attrs={'type': 'date'}))
class Meta:
model = Order
fields = (
#'first_name', 'last_name', 'phone', 'email', 'address', 'buying_type', 'order_date', 'comment'
'first_name', 'last_name', 'phone', 'email', 'address', 'buying_type', 'order_date', 'comment'
)
# class CustomSignupForm(SignupForm):
# first_name = forms.CharField(max_length=30, label='First Name')
# last_name = forms.CharField(max_length=30, label='Last Name')
# def signup(self, request, user):
# user.first_name = self.cleaned_data['first_name']
# user.last_name = self.cleaned_data['last_name']
# user.save()
# return user
# class UserEditForm(forms.ModelForm):
# class Meta:
# model = User
# fields = ('first_name', 'last_name', 'email')
#
# class ProfileEditForm(forms.ModelForm):
# class Meta:
# model = Profileee
# fields = ('test_field',) |
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2017-2020 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from sphinx.util import logging
import io
class ConfluenceLogger():
"""
confluence logger class
This class is used to manage an internally logger instance and provide
methods to easily log message at specific logging levels.
"""
logger = None
@staticmethod
def initialize():
"""
initialize the confluence logger
Before using the Confluence logger utility class, it needs to be
initialized. This method should be invoked once (ideally before any
attempts made to log).
"""
ConfluenceLogger.logger = logging.getLogger("confluence")
@staticmethod
def error(msg, *args, **kwargs):
"""
log an error message
Log a message at the error level. `msg` is a format string with the
arguments provided by `args`. See also:
https://docs.python.org/3/library/logging.html#logging.Logger.error
"""
if ConfluenceLogger.logger:
ConfluenceLogger.logger.error(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
"""
log an informative message
Log a message at the information level. `msg` is a format string with
the arguments provided by `args`. See also:
https://docs.python.org/3/library/logging.html#logging.Logger.info
"""
if ConfluenceLogger.logger:
ConfluenceLogger.logger.info(msg, *args, **kwargs)
@staticmethod
def verbose(msg, *args, **kwargs):
"""
log a verbose message
Log a message at the verbose level. `msg` is a format string with the
arguments provided by `args`. See also:
https://docs.python.org/3/library/logging.html#logging.Logger.debug
"""
if ConfluenceLogger.logger:
ConfluenceLogger.logger.verbose(msg, *args, **kwargs)
@staticmethod
def warn(msg, *args, **kwargs):
"""
log a warning message
Log a message at the warning level. `msg` is a format string with the
arguments provided by `args`. See also:
https://docs.python.org/3/library/logging.html#logging.Logger.warning
"""
if ConfluenceLogger.logger:
ConfluenceLogger.logger.warning(msg, *args, **kwargs)
@staticmethod
def trace(container, data):
"""
trace data for a given container name
Traces data with a given container name by dumping the contents directly
to a log file `trace.log`. The log file, if exists, will be appended.
This is solely for manually debugging unexpected scenarios.
"""
try:
with io.open('trace.log', 'a', encoding='utf-8') as file:
file.write(u'[%s]\n' % container)
file.write(data)
file.write(u'\n')
except (IOError, OSError) as err:
ConfluenceLogger.warn('unable to trace: %s' % err)
|
# For the final assignment of this section, we're going to make something a bit more complicated
# The goal of this program is to build a program to simplify two (simple) polynomials multiplied together
# Specifically, we would like to simplify (ax + by)*(cx + dy), where a,b,c,d are inputs given by the user
# We want to print the result in terms of x^2, xy, and y^2 (all of which can be represented as strings)
# For example, for a = 2, b = 3, c = 2, d = 3
# We would print 4x^2+12xy+9y^2
# A helpful function for this assignment is str, which converts a number to a string
# I've sketched the outline of this program below
# However, feel free to modify this program however you want to make it work
# Given a left, middle, and right term
# Formats the string to output
def result_string(x2, xy, y2):
return ""
# Calculates the x^2 term l
def calc_x2(a, b, c, d):
return 0
# Calculates the xy term
def calc_xy(a, b, c, d):
return 0
# Calculates the y^2 term
def calc_y2(a, b, c, d):
return 0
# First, get the input from the user, storing it in 4 different variables
print("Give a value for a:")
print("Give a value for b:")
print("Give a value for c:")
print("Give a value for d:")
# Second, use those values to calculate x^2, xy, and y^2
# Finally, print the result |
#! /usr/bin/env python
import math
startX = 0.0
startY = 0
xStep = 7.0
yStep = 9.0
endX = 1000.0
endY = 1000.0
amplitude = 250.0
frequency = 0.01
currentX = startX
currentY = startY
outputY = 0
outputFile = '/Users/pete/Desktop/outputpts.pts'
outFile = open(outputFile, 'w')
newline = str('\n')
print outFile
while currentX < endX:
outputY = amplitude * math.sin((currentX*frequency))
while currentY < endY:
outVal = currentX, currentY, currentX, (outputY + currentY)
outStr = str(outVal)
outFile.write(outStr)
outFile.write(newline)
currentY = currentY + yStep
currentX = currentX + xStep
currentY = startY
|
from network import *
from submission import *
from data.mapping import *
import numpy as np
import re
# Construct bayesian network
def construct_sample_network(data):
network = bayes_network(data)
# You need to list the nodes so that parents are introduced before children
# You can inspect data.mapping to see all the features
network.append_node(MEDICALSERV, "MEDICALSERV", [])
network.append_node(SCHOOLHYMN, "SCHOOLHYMN", [MEDICALSERV])
network.append_node(MILSERVICE, "MILSERVICE", [MEDICALSERV])
network.append_node(METROPOLIS, "METROPOLIS", [SCHOOLHYMN])
network.append_node(NATO, "NATO", [MILSERVICE])
network.append_node(SAIMAASEAL, "SAIMAASEAL", [SCHOOLHYMN, MILSERVICE])
return network
def is_answer_close(a, b, EPSILON = 1e-2):
return abs(a - b) <= EPSILON
# See that constructed CPT matches the example
def task_conditional_probability(data):
tests = [({SAIMAASEAL : 1}, {MILSERVICE : 1, SCHOOLHYMN: 1}, ["SAIMAASEAL", "MILSERVICE", "SCHOOLHYMN"], 0.857),
({NATO : 1}, {MILSERVICE : 0}, ["NATO", "-MILSERVICE"], 0.82),
({MEDICALSERV : 1}, {}, ["MEDICALSERV"], 0.128),
({SAIMAASEAL : 1}, {MILSERVICE : 0, SCHOOLHYMN: 1}, ["SAIMAASEAL", "-MILSERVICE", "SCHOOLHYMN"], 0.790)]
for query, conditions, fields, answer in tests:
prob = get_conditional_probability(data, query, conditions)
if is_answer_close(prob, answer):
print("correct probability P({}|{}) = {}".format(fields[0], " & ".join(fields[1:]), round(prob,3)))
else:
print("Conditional probability failed: got {}, true answer {}".format(prob, answer))
# See that constructed CPT matches the example
def task_cpt(data):
tests = [(SAIMAASEAL, [MILSERVICE, SCHOOLHYMN], ["SAIMAASEAL", "MILSERVICE", "SCHOOLHYMN"], {"0 0":0.587, "0 1":0.790, "1 0":0.834, "1 1":0.857}),]
for query, conditions, fields, answer in tests:
table = construct_probability_table(data, query, conditions)
print("Calculating CPT for P({}|{})".format(fields[0], " & ".join(fields[1:])))
print("{} : {}".format(" ".join(fields[1:]), fields[0]))
for key, probability in table.items():
assignments = re.findall(".([0-9]+):([0-1]).", key)
str_assignment = " ".join([val for _, val in assignments])
passed = "Correct" if is_answer_close(answer[str_assignment], probability) else "Not right probability, correct: {}".format(answer[str_assignment])
print("{} : {} <- {}".format(str_assignment, round(probability, 3), passed))
def test_brute_force(data, network):
tests = [([MILSERVICE], ([MEDICALSERV, SAIMAASEAL, METROPOLIS], [0, 0, 0]), ["MILSERVICE", "MEDICALSERV", "SAIMAASEAL", "METROPOLIS"], 0.183)]
for query, (E,e), fields, answer in tests:
prob = brute_force(network, query, E, e)
print("Calculating P({}|{})".format(fields[0], " & ".join(fields[1:])))
if is_answer_close(answer, prob):
print("Correct probability {}".format(round(prob, 3)))
else:
print("Wrong, true answer was {} while yours was {}".format(answer, round(prob, 3)))
def test_sampling(data, network):
tests = [([MILSERVICE], ([MEDICALSERV, SAIMAASEAL, METROPOLIS], [0, 0, 0]), ["MILSERVICE", "MEDICALSERV", "SAIMAASEAL", "METROPOLIS"], 0.183)]
for query, (E,e), fields, answer in tests:
prob = [approximate_distribution(network, query, E, e) for _ in range(3)]
print("Calculating P({}|{})".format(fields[0], " & ".join(fields[1:])))
if any([is_answer_close(answer, p, EPSILON = 3e-2) for p in prob]):
print("Correct probability {}".format(round(np.average(prob), 3)))
else:
print("Wrong, true answer was {} while yours was {}".format(answer, round(np.average(prob), 3)))
def main():
# Load data
filename = "data/hs.txt"
data = np.loadtxt(filename, delimiter=" ")
# Construct same bayesian network as in the lecture
network = construct_sample_network(data)
print("\n===\nTesting conditional probability\n===")
task_conditional_probability(data)
print("\n===\n Making CPT\n===")
task_cpt(data)
print("\n===\nTesting brute force inference\n===")
test_brute_force(data, network)
print("\n===\nTesting sampling\n===")
test_sampling(data, network)
if __name__ == "__main__":
main() |
import math
try:
import Tkinter as tk # for Python2
except:
import tkinter as tk # for Python3
import config as cfg
import helpers as hp
from Tile import Tile
import tkMessageBox as mb
import sys
sys.path.insert(0, './tantrix/PodSixNet')
#from PodSixNet.Connection import ConnectionListener #, connection
from time import sleep
#colors for highlight_forced_and_matching
colors = ["cyan2", "dark violet", "thistle1", "yellow2", "magenta3", "DarkOrchid2", "green3", "firebrick",
"MediumPurple1", "purple1"]
#colors = list(cfg.PLAYERCOLORS)
#colors.append(["yellow2", "cyan2", "magenta3", "DarkOrchid2", "green3", "firebrick", "dark violet",
# "thistle1", "MediumPurple1", "purple1"])
forcedmove = False
freemvplayed = False
directions = [[0, 1, -1], [+1, 0, -1], [+1, -1, 0], [0, -1, 1], [-1, 0, 1], [-1, 1, 0]]
#todo I put fixed tile extraction for testing
#ran = 0
final = False
class Deck(hp.DeckHelper, object): #, ConnectionListener):
def __init__(self):
self.tiles = [] #this contains tile in PhotoImage format
self.itemids = [] #itemid = cfg.canvas.create_image()
self.undealt =range(1, len(cfg.colors) + 1) #1:56
self.dealt = [] #1:56
self._positions = [] #(row, col, table)
self._table = [] #(table)
self._positions_moved = [] #(row, col, table, player_num)
self._confirmed = []
self._confirmed.append([]) #after do it for number of players
self._confirmed.append([])
self._confirmed.append([])
self._rotations = []
def is_occupied(self, rowcoltab, storage = None):
"""Return whether an hexagon is already occupied in storage,
which is by default ._positions"""
if storage is None:
storage = self._positions
return rowcoltab in storage
def is_movable(self, rowcoltab1, rowcoltab2):
table1 = rowcoltab1[2]
table2 = rowcoltab2[2]
"""Check if it goes out of the table"""
x, y = cfg.board.off_to_pixel(rowcoltab2)
if x > cfg.BOARD_WIDTH:
return False
if table2 == 0:
if y <= cfg.YTOPBOARD:
return False
elif y >= cfg.YBOTTOMBOARD:
return False
"""Ignore movement when destination is already occupied"""
if self.is_occupied(rowcoltab2):
return False
"""Return False if trying to move from bottom to top or vice versa"""
tile = self.get_tile_from_rowcoltab(rowcoltab1)
if table2 != 0 and tile.confirm != table2:
print('Cannot move from top to bottom or vice versa')
return False
return True
def is_confirmable(self, show_msg = False, rct_rot_num_obl = False):
"""Check if the board can be confirmed or not, returning "" or the error message,
respectively. The show_msg flag shows the message on the UI.
rct_rot_num_obl adds a virtual tile to the board"""
curr_tiles_on_table = self.get_rowcoltabs_in_table(0)
num_curr_tiles_on_hand1 = len(self.get_rowcoltabs_in_table(-1))
num_curr_tiles_on_hand2 = len(self.get_rowcoltabs_in_table(-2))
confirmed_tiles_on_table = self._confirmed[0]
num_confirmed_tiles_on_table = len(confirmed_tiles_on_table)
"""Correct all these values to add a virtual tile"""
if rct_rot_num_obl:
curr_tiles_on_table.append(rct_rot_num_obl[-1])
"""Correct for case rct_rot_num_obl was moved to the table otherwise tests will be skipped"""
if tuple(rct_rot_num_obl[0:3]) in curr_tiles_on_table:
curr_tiles_on_table.remove(tuple(rct_rot_num_obl[0:3]))
else:
if rct_rot_num_obl[2] == -1:
num_curr_tiles_on_hand1 -= 1
elif rct_rot_num_obl[2] == -2:
num_curr_tiles_on_hand2 -= 1
num_curr_tiles_on_table = len(curr_tiles_on_table)
msg = ""
"""If two players are rin a game, their turn is given by cfg.turnUpDown and cfg.player_num"""
_turn = (2 - cfg.turnUpDown % 2 )
if not cfg.solitaire and cfg.player_num is not _turn:
if len(self.undealt) > 0:
msg = "It is %s's turn" % (cfg.opponentname)
elif cfg.turnUpDown % 2 == 1 and num_curr_tiles_on_hand2 < 6:
if len(self.undealt) > 0:
msg = "There are tiles of Player 2 out"
elif cfg.turnUpDown % 2 == 0 and num_curr_tiles_on_hand1 < 6:
if len(self.undealt) > 0:
msg = "There are tiles of Player 1 out"
elif num_curr_tiles_on_hand1 > 6 or num_curr_tiles_on_hand2 > 6:
msg = "A Player has more than 6 tiles"
elif num_curr_tiles_on_hand1 == 6 and num_curr_tiles_on_hand2 == 6:
msg = "No tiles were placed on the board"
forced = self.check_forced()
if not forced:
matches = [self.find_matching_tiles(f, [-1 * (2 - (cfg.turnUpDown % 2))]) for f in forced]
if len(matches):
msg = "No new tiles but first fill in forced spaces"
elif num_curr_tiles_on_hand1 + num_curr_tiles_on_hand2 > 11:
msg = "no tiles from hand1 or hand2 are out2"
elif num_curr_tiles_on_hand1 + num_curr_tiles_on_hand2 < 11:
if len(self.undealt) > 0:
msg = "More than 1 tile from hand1 and hand2 are out"
elif num_confirmed_tiles_on_table - num_curr_tiles_on_table == 0:
msg = "No tiles were added to the table"
elif num_confirmed_tiles_on_table - num_curr_tiles_on_table > 1:
raise UserWarning("more than one tile were added to the table. I should not see this msg")
elif num_curr_tiles_on_table - num_confirmed_tiles_on_table < 0:
raise UserWarning("There are less tiles on table that in ._confirmed[0]. I should not see this msg")
elif num_curr_tiles_on_hand1 + num_curr_tiles_on_hand2 == 11:
if num_curr_tiles_on_table == 1:
#only one tile on the table
pass
elif num_curr_tiles_on_table - num_confirmed_tiles_on_table == 1:
"""Find tile to be confirmed"""
if rct_rot_num_obl:
rowcoltab = rct_rot_num_obl[-1]
ind = self.get_index_from_rowcoltab(rct_rot_num_obl[0:3])
angle = rct_rot_num_obl[3]
else:
for m in self._positions_moved:
rowcoltab = self.get_rowcoltab_from_rowcolnum(m)
if rowcoltab[2] == 0:
break
"""Check if new tile is adjacent to other tiles"""
neighboring = self.get_neighboring_tiles(rowcoltab[0], rowcoltab[1])
if not neighboring:
return "One tile is not adjacent to any other tile"
ind = self.get_index_from_rowcoltab(rowcoltab)
angle = 0
tile = self.tiles[ind]
"""Check if colors match"""
match = tile.tile_match_colors(rowcoltab, angle = angle)
if not match:
msg = "Colors do not match"
else:
"""Check matching tiles for forced spaces, and see if moved tile is between them"""
if rct_rot_num_obl:
matches = []
else:
obliged, matches = self.purge_matchings(table = 'current') #matches can be [[]]
matches[:] = [x for x in matches if len(x)]
if len(matches):
if rowcoltab not in obliged:
msg = "Fill all forced spaces"
"""Check impossible hexagon ie if any neighbors must match three identical colors"""
if not msg:
if len(self.undealt) > 0:
check = self.impossible_neighbor(rowcoltab, rct_rot_num_obl = rct_rot_num_obl)
if check:
msg = check
if self.controlled_side(rowcoltab):
msg = "A controlled side prevents this move"
if show_msg:
cfg.board.message(msg)
return msg
def confirm_move(self, send = True, force = False):
"""Confirm position of moved tile, if possible"""
'''change of turn turnUpDown is done in post_confirm'''
if force:
confirmable = True
else:
confirmable = self.is_confirmable() == ""
if confirmable != True:
cfg.board.message(confirmable)
return False
#Place first tile in the middle
"""if cfg.turnUpDown == 1:
rowcoltab = self.get_rowcoltab_from_rowcolnum(self._positions_moved[0])
self.move_automatic(rowcoltab, (math.floor(cfg.ROWS / 2) - 1, math.floor(cfg.COLS / 2), 0))"""
"""Update confirmed storage and _positions"""
#Use descending loop on _positions_moved because I will remove items from it
for i in range(len(self._positions_moved) - 1, -1, -1):
moved_rowcolnum = self._positions_moved[i] #Origin
moved_rowcoltab2 = self.get_rowcoltab_from_rowcolnum(moved_rowcolnum) #Destination
if moved_rowcoltab2[2] == 0:
'''._confirmed[0] must get one tile more'''
self._confirmed[-moved_rowcoltab2[2]].append(moved_rowcolnum)
'''._confirmed[1] or ._confirmed[2] must remove one tile'''
for table in (1, 2):
match = filter(lambda t : t[2] == moved_rowcolnum[2], [conf_rcn for conf_rcn in self._confirmed[table]])
if len(match) == 1:
moved_rowcoltab1 = (match[i][0], match[i][1], -table)
self._confirmed[table].remove(match[i])
break
elif len(match) > 1:
raise UserWarning("confirm_move: ._confirmed[{}] has more than one tile played!".format(str(table)))
"""Remove confirmed from storage of unconfirmed moved tiles _positions_moved"""
self._positions_moved.remove(moved_rowcolnum)
elif moved_rowcolnum[2] in [n[2] for n in self._confirmed[-moved_rowcoltab2[2]] ]:
'''Here I reconfirmed tiles that were moved from e.g. top to top'''
ind_to_change = [(j, v) for (j, v) in enumerate(self._confirmed[-moved_rowcoltab2[2]]) if v[2] == moved_rowcolnum[2]]
print(len(ind_to_change))
self._confirmed[-moved_rowcoltab2[2]][ind_to_change[0][0]] = moved_rowcolnum
"""Update confirm storage in tile object"""
ind = cfg.deck.get_index_from_tile_number(moved_rowcolnum[2])
self.tiles[ind].confirmed = moved_rowcoltab2[2]
"""Send to server"""
angle = self.tiles[ind].angle
if send:
#Not Useful: moved_rowcolnum = (moved_rowcolnum[0] - cfg.shifts[0] * 2, moved_rowcolnum[1] - cfg.shifts[1], moved_rowcolnum[2])
moved_rowcoltab2 = (moved_rowcoltab2[0] - cfg.shifts[0] * 2, moved_rowcoltab2[1] - cfg.shifts[1], moved_rowcoltab2[2])
cfg.gui_instance.send_to_server("confirm", rowcoltab1 = moved_rowcoltab1, #rowcolnum = moved_rowcolnum,
rowcoltab2 = moved_rowcoltab2, angle = angle, turnUpDown = cfg.turnUpDown)
"""Append to history"""
rowcoltabnumrotDest = list(moved_rowcoltab2)
rowcoltabnumrotDest.append(moved_rowcolnum[2])
rowcoltabnumrotDest.append(cfg.deck.tiles[ind].angle)
action = "received: " if force else "confirmed:"
if (2 - cfg.turnUpDown % 2) == cfg.player_num:
player = cfg.name + " pl" + str(cfg.player_num)
else:
player = cfg.opponentname + " pl" + str(cfg.player_num % 2 + 1)
cfg.history.append(["turn=" + str(cfg.turnUpDown), player,
"Forced: " + str(forcedmove), action, tuple(rowcoltabnumrotDest)])
return True
def highlight_forced_and_matching(self):
""""Finds tiles from both players matching forced spaces and highlights them on the UI"""
global colors
"""define colors the first time""" #TODO mv before, in GUI or so
if cfg.playercolor in colors:
colors.remove(cfg.playercolor)
colors.remove(cfg.opponentcolor)
colors.remove(cfg.PLAYERCOLORS[cfg.PLAYERCOLORS.index(cfg.playercolor) + 4])
colors.remove(cfg.PLAYERCOLORS[cfg.PLAYERCOLORS.index(cfg.opponentcolor) + 4])
j = 0
msg = ""
"""Get the obliged tiles and matches of the player in the current turn"""
obliged, matches = self.purge_matchings(table = 'current')
"""Place highlight and show message if there are forced spaces"""
if len(matches):
"""There are matching tiles of current player fitting in forced spaces. Do nothing"""
for i, o in enumerate(obliged):
if len(matches[i]):
msg = "There are forced spaces"
cfg.board.place_highlight(obliged[i], colors[j % len(colors)])
for m in matches[i]:
cfg.board.place_highlight(m, colors[j % len(colors)])
j += 1
cfg.board.message(msg)
return matches
def purge_matchings(self, table = 'current'):
"""Get the obliged tiles and matches of the player in the current turn. The matches must lead to
an confirmable board. Return obliged and matches as: [(1,1,0),(1,2,0)] and [[],[(0,1,-1),(0,2,-1)]],
respectively."""
"""Find spaces on board with 3 neighbors that are possible forced spaces"""
obliged = self.check_forced()
"""Get tiles matching the forced spaced and the colors they have to satisfy"""
if table == 'current':
table = [-1 * (2 - (cfg.turnUpDown % 2))]
"""Find the possible matching tiles in forced spaces"""
#matches = [self.find_matching_tiles(o, table) for o in obliged]
matches = []
forced_colors = []
c_orient = []
for obl in obliged:
m, c, o = self.find_matching_tiles(obl, table, return_colors = True)
matches.append(m)
forced_colors.append(c)
c_orient.append(o)
"""There can be only one rotation matching a forced space. Find it for each tile that
could fit in a obliged hexagon"""
toremove = []
for i, matchings_1hex in enumerate(matches):
if len(matchings_1hex) == 0:
continue
obliged_pos = obliged[i]
hexcolor = forced_colors[i]
color_orient = c_orient[i]
"""Loop on every single tile. NB matches can be [[], [(0,2,-1)], [(0,0,-1),(0,1,-1)]]"""
for m in matchings_1hex:
"""Get the color and orientation of the tile"""
ind = self.get_index_from_rowcoltab(m)
tilecolor = self.tiles[ind].getColor()
tilecolor += tilecolor
rot = (tilecolor.index(hexcolor) - color_orient) * 60
"""Create a virtual tile to check if it is confirmable"""
rct_rot_num_obl = list(m)
rct_rot_num_obl.append(rot)
rct_rot_num_obl.append(self.get_tile_number_from_rowcoltab(m))
rct_rot_num_obl.append(obliged_pos)
"""Check if tile would make the board confirmable"""
confirmable = self.is_confirmable(show_msg = False, rct_rot_num_obl = rct_rot_num_obl)
if confirmable:
"""Procrastinate removing bad matches after exiting the loop"""
toremove.append([m, i])
"""If some matches would not be valid, remove them"""
for rem in toremove:
matches[rem[1]].remove(rem[0])
return obliged, matches
def post_confirm(self):
"""Take care of updating turnUpDown, free, the message etc"""
"""Change current player: make sure that after the play there are no forced spaces"""
matchinglistcurrent = self.highlight_forced_and_matching() #can be [], [[]] or [[(),()]]
#Correct when matchinglistcurrent is [[]]
matchinglistcurrent[:] = [x for x in matchinglistcurrent if len(x)]
"""history - match cur section"""
matchinglistcurrentnum = list(matchinglistcurrent)
if len(matchinglistcurrent) is not 0:
for i, listt in enumerate(matchinglistcurrent):
for j, rct in enumerate(listt):
num = cfg.deck.get_tile_number_from_rowcoltab(rct)
matchinglistcurrentnum[i][j] = list(matchinglistcurrentnum[i][j])
matchinglistcurrentnum[i][j].append(num)
matchinglistcurrentnum[i][j] = tuple(matchinglistcurrentnum[i][j])
cfg.history[-1].append("match cur:")
cfg.history[-1].append(matchinglistcurrentnum)
"""Get globals"""
global forcedmove
global freemvplayed
if not forcedmove:
freemvplayed = True
"""Update turnUpDown when no matching tiles for current player"""
if len(matchinglistcurrent) == 0:
cfg.board.remove_all_highlights()
if forcedmove:
forcedmove = False
cfg.history[-1].append("Forced becomes: " + str(forcedmove))
"""Change turn to next player because no forced tiles to place and the previous was not a forced move"""
cfg.turnUpDown += 1
self.update_stipples()
freemvplayed = False
"""Check if there are forces matches for the opponent"""
matchinglistother = self.highlight_forced_and_matching()
#Correct when matchinglist* are [[]] or [[(1,2,-1)],[]]
matchinglistcurrent[:] = [x for x in matchinglistcurrent if len(x)]
matchinglistother[:] = [x for x in matchinglistother if len(x)]
if len(matchinglistother):
forcedmove = True
cfg.board.message("There are forced spaces")
else:
forcedmove = False
"""history - match opp"""
matchinglistothernum = list(matchinglistother)
if len(matchinglistother) is not 0:
for i, listt in enumerate(matchinglistother):
for j, rct in enumerate(listt):
num = cfg.deck.get_tile_number_from_rowcoltab(rct)
print(matchinglistothernum[i][j])
matchinglistothernum[i][j] = list(matchinglistothernum[i][j])
matchinglistothernum[i][j].append(num)
matchinglistothernum[i][j] = tuple(matchinglistothernum[i][j])
cfg.history[-1].append("match opp:")
cfg.history[-1].append(matchinglistothernum)
else: #There is a forced tile on the current player
forcedmove = True
cfg.history[-1].append("Forced on current player: " + str(forcedmove))
cfg.win.update()
"""history"""
cfg.history[-1].append("turn=: " + str(cfg.turnUpDown))
return True
def remove(self, row, col, table):
rowcoltab = tuple([row, col, table])
ind = self.get_index_from_rowcoltab(rowcoltab)
"""Delete itemid from table and .itemids"""
try:
itemid = self.itemids.pop(ind)
except:
print("remove: Cannot do self.itemids.pop({}) !!".format(ind))
print("rowcoltab={}".format(str(rowcoltab)))
print("len self.itemids=", str(len(self.itemids)))
self.log()
raise UserWarning("remove: Error!")
"""I think this is already done by move_automatic but ok.."""
cfg.canvas.delete(itemid)
"""Update confirmed storage"""
n = self.get_tile_number_from_index(ind)
rowcolnum = tuple([row, col, n])
if not cfg.TRYING:
if table == 0:
self._confirmed[0].remove(rowcolnum)
elif table == -1:
print("removing: _confirmed[1] and row, col, ind")
self._confirmed[1].remove(rowcolnum)
elif table == -2:
self._confirmed[2].remove(rowcolnum)
"""Update _positions_moved"""
if rowcolnum in self._positions_moved:
print("removed rowcolnum {} from _positions_moved".format(rowcolnum))
self._positions_moved.remove(rowcolnum)
"""NB: remove tile from deck dealt. leaving undealt as is"""
num = self.dealt.pop(ind)
"""Return information"""
pos = self._positions.pop(ind)
table = self._table.pop(ind)
tile = self.tiles.pop(ind)
return (pos, num, tile)
def deal(self, row, col, tab, num = 'random'):
row = int(row)
col = int(col)
"""Random tile if player_num is not set"""
if num is 'random':
if len(self.undealt) == 1:
ran = 0
global final
final = True
self.alert("No more tiles left! Now rules change")
elif len(self.undealt) == 0:
return
else:
ran = cfg.rndgen.randint(0, len(self.undealt) - 1) #0:55
#todo I put fixed tile extraction for testing
#global ran
#ran = (ran + 12) % (len(self.undealt) - 1) #DOTO RM LATER!
num = self.undealt.pop(ran) #1:56
#TODO rm this when deploying
#if num == 14: num += 1
"""Get tile as PhotoImage"""
tileobj = Tile(num, angle = 0)
"""Update storage"""
rowcoltab = tuple([row, col, tab])
temp = rowcoltab
#temp = (cfg.COLS / 2, cfg.ROWS / 2, 0) #this shows a nice automatic dealing
self.tiles.append(tileobj)
self.dealt.append(num)
self._positions.append(temp)
self._rotations.append(0)
self._table.append(tab)
"""Place on canvas"""
itemid = tileobj.create_at_rowcoltab(temp)
self.itemids.append(itemid)
#self.move_automatic(temp, rowcoltab) #this shows a nice automatic dealing
#self._positions_moved.pop()
"""Update confirmed storage"""
ind = self.get_index_from_rowcoltab(rowcoltab)
n = self.get_tile_number_from_index(ind)
rowcolnum = tuple([row, col, n])
if tab == 0:
self._confirmed[0].append(rowcolnum)
elif tab == -1:
self._confirmed[1].append(rowcolnum)
elif tab == -2:
self._confirmed[2].append(rowcolnum)
"""Store confirmed in tile object"""
tileobj.confirm = tab
def move(self, rowcoltab1, rowcoltab2, force = False):
"""Move a tile and update storage. ._positions_moved are updated.
Return True/False if successfull"""
_, _, tab1 = rowcoltab1
_, _, tab2 = rowcoltab2
if not force and not self.is_movable(rowcoltab1, rowcoltab2):
print("move: You cannot move the tile as it is to this hexagon")
self.is_movable(rowcoltab1, rowcoltab2)
return False
itemid, ind = self.get_itemid_from_rowcoltab(rowcoltab1)
tilex, tiley = cfg.board.off_to_pixel(rowcoltab2)
cfg.canvas.coords(itemid, (tilex, tiley))
"""Update moved storage"""
num = self.dealt[ind]
rowcolnum1 = tuple([rowcoltab1[0], rowcoltab1[1], num])
rowcolnum2 = tuple([rowcoltab2[0], rowcoltab2[1], num])
if rowcolnum1 in self._positions_moved:
self._positions_moved.remove(rowcolnum1)
if tab2 == 0:
self._positions_moved.append(rowcolnum2)
elif rowcoltab2 not in self.get_rowcoltabs_in_table(tab2):
if rowcolnum2 not in self.get_confirmed_rowcolnums_in_table(tab2):
self._positions_moved.append(rowcolnum2)
self._positions[ind] = (rowcoltab2)
self._table[ind] = (tab2)
"""Update window"""
cfg.win.update()
return True
def move_automatic(self, rowcoltab1, rowcoltab2, angle = False):
"""move and rotate a tile automatically. NB: .move is used and therefore also ._positions_moved is updated"""
itemid, ind = self.get_itemid_from_rowcoltab(rowcoltab1)
"""Rotate the tile to be moved until it matches rotation"""
if angle is not False:
for rot in range(6):
if self.tiles[ind].angle == angle:
break
elif rot is 6:
raise UserWarning("move_automatic: could not find the right rotation in 6 rotations")
angle_temp, itemid = self.rotate(rowcoltab1, force = False)
sleep(0.25)
if angle_temp is False:
raise UserWarning("move_automatic: could not rotate the tile")
"""Calculate coordinates, direction, distance etc"""
x1, y1 = cfg.board.off_to_pixel(rowcoltab1)
x2, y2 = cfg.board.off_to_pixel(rowcoltab2)
dir = (float(x2 - x1), float(y2 - y1))
distance = math.sqrt(dir[0] * dir[0] + dir[1] * dir[1])
steps = int(math.ceil(distance / 10))
if steps is 0:
pass
deltax, deltay = dir[0] / steps, dir[1] / steps
for i in range (1, steps + 1):
xi = x1 + round(deltax * i)
yi = y1 + round(deltay * i)
cfg.canvas.coords(itemid, (xi, yi))
#cfg.canvas.after(25, cfg.win.update())
#TODO when rotated it does not update!
sleep(0.02)
cfg.win.update()
#print(xi,yi,cfg.canvas.coords(itemid))
ok = self.move(rowcoltab1, rowcoltab2)
return ok
def rotate(self, rowcoltab, force = False, clockwise = True):
"""Rotate a tile if tile is not locked: spawn it, replace itemid in self.itemids.
Return the angle (0 to 300) if successful, False if not. Also return the new itemid"""
"""Find the index"""
try:
ind= self.get_index_from_rowcoltab(rowcoltab)
except:
print('not found: ' + str(rowcoltab) +' in')
return False
num = cfg.deck.get_tile_number_from_index(ind)
if not force and num in [rcn[2] for rcn in cfg.deck._confirmed[0]]:
return False
"""Spawn the rotated tile"""
clockwise = 1 if clockwise else -1
tile = Tile(self.dealt[ind], self.tiles[ind].angle - 60 * clockwise)
"""Restore the confirmed position of the tile"""
tile.confirm = self.tiles[ind].confirm
"""Update tiles list"""
self.tiles[ind] = tile
#print("rotate: after spawn before savng in .tiles: ",str(self.tiles[ind].basecolors))
"""Store rotation in storage"""
self._rotations[ind] = tile.angle #(self._rotations[ind] + 60) % 360
"""Place the tile"""
itemid = tile.create_at_rowcoltab(rowcoltab)
self.itemids[ind] = itemid
return self._rotations[ind], itemid
def refill_deck(self, tab):
"""Refill a player's deck"""
"""Check how many tiles there are"""
rowcoltab = self.get_rowcoltabs_in_table(tab)
count = len(rowcoltab)
if count == 6:
return False
"""Flush existing tiles to left"""
for i in range(0, count):
bin, cols, bin = rowcoltab[i]
if cols > i:
"""move tile to left by one or more places (if I move and reset tiles)"""
ok = False
while not ok:
ok = self.move_automatic((0, cols, tab), (0, i, tab), angle = False)
if ok:
num = self.get_tile_number_from_rowcoltab((0, i, tab))
ind_conf = self._confirmed[-tab].index((0, cols, num))
try:
self._confirmed[-tab][ind_conf] = (0, i, num)
except:
self.log()
print("self._confirmed[1].index() is not in list".format(str((0, cols, num))))
#if tab == -1:
# ind_conf = self._confirmed[1].index((0, cols, player_num))
# self._confirmed[1][ind_conf] = (0, i, player_num)
#elif tab == -2:
# ind_conf = self._confirmed[2].index((0, cols, player_num))
# self._confirmed[2][ind_conf] = (0, i, player_num)
else:
print("That might be ok. I will try again flushing")
if i > 6:
raise UserWarning("Cannot flush!")
#This updates _positions_moved
num = self.get_tile_number_from_rowcoltab(tuple([0, i, tab]))
try:
self._positions_moved.remove(tuple([0, i, num]))
except:
print(i, cols, num, self._positions_moved)
print("problem here!")
i += 1
"""Refill deck"""
for i in range(count, 6):
self.deal(0, i, tab)
return True
def reset(self):
"""Reset the table by bringing unconfirmed tiles back to confirmed position.
Tiles are not reset to the original rotation"""
while (self._positions_moved != []):
"""Get info on moved tile"""
rowcolnum1 = self._positions_moved[-1]
rowcoltab1 = self.get_rowcoltab_from_rowcolnum(rowcolnum1)
"""Find where tile in ._positions_moved should go,
ie tile player_num rowcolnum1[2] is present in confirmed storage"""
confirmed = [self._confirmed[1], self._confirmed[0], self._confirmed[2]]
tab_confirmed = [-1, 0, -2]
rowcoltab2 = [] #list of all rowcoltab that were moved
for i, bin in enumerate(confirmed):
for rowcolnum2 in confirmed[i]:
if rowcolnum2[2] == rowcolnum1[2]:
r, c, cv = self.get_rowcoltab_from_rowcolnum(rowcolnum2)
rowcoltab2.append(tuple([r, c, tab_confirmed[i]]))
else:
continue
break
"""Move rowcoltab1 to rowcoltab2"""
if len(rowcoltab2) > 1:
raise UserWarning("Deck.reset: more than one rowcolnum per tiles in confirmed positions. It should not happen")
elif rowcoltab2:
print("reset: moving {} to {}".format(str(rowcoltab1), str(rowcoltab2[0])))
ok = self.move_automatic(rowcoltab1, rowcoltab2[0], angle = False)
print("reset: move_automatic ok=:",ok)
"""If tile cannot be moved because original place is occupied, move it to temporary position"""
if not ok:
temp = (rowcoltab2[0][0], -1, rowcoltab2[0][2])
print("reset move_automatic to temp:",temp)
ok2 = self.move_automatic(rowcoltab1, temp, angle = False)
print("reset: move_automatic ok2=:",ok2)
#while loop takes last tile. continues with second tile.
last = self._positions_moved.pop(-1)
self._positions_moved.insert(0, last)
"""here _position_moved has been purged"""
self.highlight_forced_and_matching()
return True
def get_surrounding_hexagons(self, table):
"""Return a set of rowcolnum. which are all the empty hexagons surrounding tiles on a table.
The table is _confirmed[0] by default"""
if table is None:
table = self._confirmed[0]
surr = set([])
for t in table:
hex = cfg.board.get_neighboring_hexagons(t[0], t[1])
[surr.add(h) for h in hex]
for t in table:
rowcoltab = self.get_rowcoltab_from_rowcolnum(t)
if rowcoltab in surr:
surr.remove(rowcoltab)
return surr
def check_forced(self):
"""Check for possible forced spaces on the main table. Return the hexagons rowcolnum"""
hex_surrounding_board = self.get_surrounding_hexagons(self._confirmed[0])
obliged_hexagons = []
rowcoltab_in_confirmed0 = [self.get_rowcoltab_from_rowcolnum(c) for c in self._confirmed[0]]
for s in hex_surrounding_board:
"""Get confirmed neighboring tiles"""
rowcoltabs = cfg.board.get_neighboring_hexagons(s[0], s[1])
#"""Find if there is a tile on rowcoltabs"""
#confirmed_neigh_tiles = 0
#for rowcoltab in rowcoltabs:
# if rowcoltab in rowcoltab_in_confirmed0:
# confirmed_neigh_tiles += 1
"""Intersect neighboring hexagons and confirmed tiles"""
confirmed_neigh_tiles = len(set(rowcoltabs) & set(rowcoltab_in_confirmed0))
"""Count if confirmed neighbouring tiles is 3"""
if confirmed_neigh_tiles == 3:
print("Forced space at {},{}".format(s[0], s[1]))
obliged_hexagons.append(s)
"""Get tiles matching"""
elif confirmed_neigh_tiles > 3:
raise UserWarning("Hexagon at {},{} is surrounded by >3 tiles!".format(s[2], s[0], s[1]))
return obliged_hexagons
def find_matching_tiles(self, rowcoltab, table = [-1, -2], return_colors = False):
"""Find all tiles of a table that fit in an empty hexagon. Return a list of rocolnum.
If the flag return_colors is True, also return the colors to satisfy and the
orientation of the first color"""
"""Get the neighbors"""
color_index = self.get_neighboring_colors(rowcoltab)
if not len(color_index):
#print("find_matching_tiles: hexagon has no neighbors".format(str(rowcoltab)))
return
elif len(color_index) > 3 and not cfg.TRYING:
raise UserWarning("Four neighbors!")
"""Get the colors surrounding the tile"""
colors_temp = ''
j = 0
for i in range(0, 6):
if j >= len(color_index):
colors_temp += '-'
elif i == color_index[j][1]:
colors_temp += color_index[j][0]
j += 1
else:
colors_temp += '-'
colors_temp *= 2
colors_split = colors_temp.split('-')
colors_temp2 = [i for i in colors_split if i is not '']
colors = colors_temp2[1]
"""Get all confirmed tiles in the desired tables"""
match = []
for tab in table:
confs = self.get_confirmed_rowcolnums_in_table(tab)
for conf in confs:
ind2 = self.get_index_from_tile_number(conf[2])
tile2 = self.tiles[ind2]
#tile2, ind2 = self.get_tile_from_tile_number(conf[2])
if colors in tile2.basecolors + tile2.basecolors:
match.append(self._positions[ind2])
if return_colors:
return match, colors, colors_temp.index(colors)
return match
def impossible_neighbor(self, rowcolnum, rct_rot_num_obl = False):
"""Check is a place (rowcolnum) has impossible neighbors around it"""
neigh_rowcoltabs = cfg.board.get_neighboring_hexagons(rowcolnum)
rowcoltab_inmain = self.get_rowcoltabs_in_table(0)
if rct_rot_num_obl:
rcn = rct_rot_num_obl[0:2]
rcn.append(0)
rowcoltab_inmain.append(rcn)
for rct in neigh_rowcoltabs:
if rct not in rowcoltab_inmain:
color_dirindex_neighIndex = self.get_neighboring_colors(rct, rct_rot_num_obl = rct_rot_num_obl)
if len(color_dirindex_neighIndex) == 3:
if color_dirindex_neighIndex[0][0] == color_dirindex_neighIndex[1][0] and color_dirindex_neighIndex[0][0] == color_dirindex_neighIndex[2][0]:
return "A neighboring tile would have to match three identical colors"
elif len(color_dirindex_neighIndex) == 4:
return "A neighboring tile would have four neighbors"
return False
def controlled_side(self, rowcoltab):
rowcol_inmain = [(rcn[0], rcn[1]) for rcn in self._confirmed[0]]
if len(rowcol_inmain) < 3:
return False
cube0 = cfg.board.off_to_cube(rowcoltab[0], rowcoltab[1])
neigh = self.get_neighboring_colors(rowcoltab) #(color, ind, n)
neigh_number = len(neigh)
if neigh_number > 2:
return False
"""Directions of the neighbors"""
dir_ind1 = [n[1] for n in neigh]
"""Take each of the one or two neighbors at a certain direction"""
for i1 in range(0, neigh_number):
cube1 = map(lambda x, y : x + y, cube0, directions[dir_ind1[i1]])
rowcol1 = cfg.board.cube_to_off(cube1)
"""Find new direction to go straight"""
if neigh_number == 1:
"""explore both angles"""
dir_ind2n = [(dir_ind1[i1] - 1) % 5, (dir_ind1[i1] + 1) % 5]
else:
"""go opposite to the other neighbor"""
dir_ind2n = [(dir_ind1[i1] + dir_ind1[i1] - dir_ind1[(i1 + 1) % 2] + 6) % 6]
for i2 in range(0, len(list(dir_ind2n))):
cube2n = map(lambda x, y : x + y, cube1, directions[dir_ind2n[i2]])
rowcol2n = cfg.board.cube_to_off(cube2n)
if rowcol2n not in rowcol_inmain:
continue
"""go straight till the end but check at right angle as well"""
empty2n = False
while empty2n is False:
"""Check tile at an angle"""
dir_indn = (dir_ind2n[i2] - dir_ind1[i1] + dir_ind2n[i2] + 6 ) % 6
cuben = map(lambda x, y : x + y, cube2n, directions[dir_indn])
rowcoln = cfg.board.cube_to_off(cuben)
if rowcoln in rowcol_inmain:
return True
"""update tile to the one straight ahead. exit while loop if empty"""
cube2n = map(lambda x, y : x + y, cube2n, directions[dir_ind2n[i2]])
rowcol2n = cfg.board.cube_to_off(cube2n)
if rowcol2n not in rowcol_inmain:
empty2n = True
return False
def score(self, player):
"""Calculate the scores for player 1 or 2"""
if player == 1:
color = cfg.hand1.playercolors[0][0]
elif player == 2:
color = cfg.hand2.playercolors[1][0]
score = []
score_loop = []
scanned_off = []
conf_rowcols = [c[0:2] for c in self._positions if c[2] is 0]
"""Loop on all confirmed tiles"""
while 1:
"""See if there are unscanned tiles"""
scanned_number = len(map(len, scanned_off))
if scanned_number >= len(conf_rowcols):
break
else:
"""Find the first _confirmed that was not scanned"""
find_first = False
while not find_first:
for c in conf_rowcols: #self._confirmed[0]:
if c[0:2] not in scanned_off:
rowcolnum = c
break
scanned_off.append(rowcolnum[0:2])
#cfg.board.place_highlight((rowcolnum[0], rowcolnum[1], 0))
ind = self.get_index_from_rowcoltab((rowcolnum[0], rowcolnum[1], 0))
tile = self.tiles[ind]
clr = tile.getColor()
if color in clr:
score.append(1)
neighboring_colors = self.get_neighboring_colors(
rowcolnum[0], rowcolnum[1], color)
if len(neighboring_colors) == 0:
thread = False
else:
(neigh_color, ang, _) = neighboring_colors[0]
thread = True
curr_off = rowcolnum[0:2]
else:
thread = False
break
"""Loop on a thread"""
while thread:
"""Get the angle of the color, then follow to the adjacent tile"""
dir = directions[ang]
cube = cfg.board.off_to_cube(curr_off[0], curr_off[1])
next_cube = tuple(map(lambda c, d: c + d, cube, dir))
next_off = cfg.board.cube_to_off(next_cube)
#cfg.board.place_highlight((next_off[0], next_off[1], 0))
"""Check if it closes the loop"""
if next_off in scanned_off:
score_loop.append(score.pop() * 2)
cfg.board.remove_all_highlights()
break
"""Check if present"""
if self.is_occupied((next_off[0], next_off[1]), conf_rowcols):
curr_off = next_off
score[-1] += 1
ang_from = (ang + 3) % 6
tile = self.get_tile_from_rowcoltab((curr_off[0], curr_off[1], 0))
clr = tile.getColor()
angs = (clr.find(color), clr.rfind(color))
ang = angs[0]
if ang == ang_from:
ang = angs[1]
scanned_off.append(curr_off)
else:
break
#cfg.board.remove_all_highlights()
"""Transcribe the scores"""
cfg.scores[player - 1] = 0
if len(score):
cfg.scores[player - 1] = score
score = max(score)
else:
score = 0
cfg.scores_loop[player - 1] = 0
if len(score_loop):
cfg.scores_loop[player - 1] = score_loop
score_loop = max(score_loop)
else: score_loop = 0
#print("cfg.scores[] =" + str(cfg.scores[player - 1]))
#print("cfg.scores_loop[]=" + str(cfg.scores_loop[player - 1]))
return score, score_loop
def is_shiftable(self, horiz = 0, vert = 0):
"""Return if it is possible to do a horizontal or vertical shift of the tiles on the table"""
if len(self._confirmed[0]) < 1:
return False
if horiz == 0 and vert == 0:
#print("Zero shifts are not allowed")
return False
"""Horizontal shift"""
if horiz:
rows = [p[0] for p in self._confirmed[0]]
row_min = min(rows)
xmin, _ = cfg.board.off_to_pixel((row_min, 0, 0))
row_max = max(rows)
xmax, _ = cfg.board.off_to_pixel((row_max, 0, 0))
"""Allow to move 1/-1 i.e. lx/rx"""
if horiz == -1:
if xmin > cfg.HEX_SIZE * 4:
#print("Shift left: xmin is high so ok")
return True
elif xmin <= cfg.HEX_SIZE * 4:
if xmax >= cfg.BOARD_WIDTH: # - cfg.HEX_SIZE * 2:
#print("Shift left: xmin is low, but xmax is high so ok")
return True
else:
#print("Shift left: xmin is low so deny shift")
return False
elif horiz == 1:
if xmax < cfg.BOARD_WIDTH - cfg.HEX_SIZE * 4:
#print("Shift right: xmax is low so ok")
return True
elif xmax >= cfg.BOARD_WIDTH - cfg.HEX_SIZE * 4:
if xmin <= cfg.HEX_SIZE * 3:
#print("Shift right: xmax is high, but xmin is low so ok")
return True
else:
#print("Shift right: xmax is high so deny shift")
return False
"""Vertical shift"""
if vert:
cols = [p[1] for p in self._confirmed[0]]
col_min = min(cols)
_, ymin = cfg.board.off_to_pixel((0, col_min, 0))
col_max = max(cols)
_, ymax = cfg.board.off_to_pixel((0, col_max, 0))
"""Allow to move 1/-1 i.e. up/down"""
if vert == 1: #down
if ymax < cfg.YBOTTOMBOARD - cfg.HEX_HEIGHT * 2:
#print("Shift down: ymax is low so ok")
return True
elif ymax >= cfg.YBOTTOMBOARD - cfg.HEX_HEIGHT * 2:
if ymin <= cfg.YTOPBOARD + cfg.HEX_SIZE:
#print("Shift down: ymax is high, but ymin is low so ok")
return True
else:
#print("Shift down: ymax is high so deny shift")
return False
elif vert == -1:
if ymin > cfg.YTOPBOARD + cfg.HEX_HEIGHT * 2:
#print("Shift up: ymin is high so ok")
return True
elif ymin <= cfg.YTOPBOARD + cfg.HEX_HEIGHT * 2:
if ymax >= cfg.YBOTTOMBOARD - cfg.HEX_HEIGHT:
#print("Shift up: ymin is low, but ymax is high so ok")
return True
else:
#print("Shift up: ymin is low so deny shift")
return False
print("I should not come here in is_shiftable!")
return False
def shift(self, shift_row = 0, shift_col = 0):
"""Shift the whole board based on the current storage"""
if shift_row:
if not self.is_shiftable(horiz = shift_row):
return False
if shift_col:
if not self.is_shiftable(vert = shift_col):
return False
"""Store all the info that has to be used to move the tiles.
I cannot simply move because tiles will be temporarily overlayed"""
rowcoltabs_to_move = []
rowcoltab_destinations = []
rowcolnum_destinations = []
indexes_confirmed = []
indexes_positions = []
itemids = [] #"""Find what has to be moved and store all information"""
for ind, rowcoltab in enumerate(self._positions):
if rowcoltab[2] is 0:
indexes_positions.append(ind)
rowcoltabs_to_move.append(rowcoltab)
rowcolnum = self.get_rowcolnum_from_rowcoltab(rowcoltab)
rowcoltab_dest = (rowcoltab[0] + shift_row * 2, rowcoltab[1] + shift_col, 0)
rowcoltab_destinations.append(rowcoltab_dest)
itemid, _ = self.get_itemid_from_rowcoltab(rowcoltab)
itemids.append(itemid)
"""Update _confirmed storage and remove confirmed from ._positions_moved"""
if rowcolnum in self._confirmed[0]:
indexes_confirmed.append(self._confirmed[0].index(rowcolnum))
rowcolnum_destinations.append((rowcoltab_dest[0], rowcoltab_dest[1], rowcolnum[2]))
else:
indexes_confirmed.append(-666)
rowcolnum_destinations.append(-666)
for i in range(0, len(rowcoltabs_to_move)):
"""Cannot use .move so move "manually" """
tilex, tiley = cfg.board.off_to_pixel(rowcoltab_destinations[i])
cfg.canvas.coords(itemids[i], (tilex, tiley))
"""Update _positions"""
self._positions[indexes_positions[i]] = rowcoltab_destinations[i]
"""Update confirmed from ._positions_moved"""
if indexes_confirmed[i] != -666:
self._confirmed[0][indexes_confirmed[i]] = rowcolnum_destinations[i]
#"""Remove confirmed from ._positions_moved"""
#cfg.deck._positions_moved.remove(rowcolnum_destinations[i])
""""Manually" shift the _positions_moved storage"""
for i, rowcolnum in enumerate(self._positions_moved):
self._positions_moved[i] = (rowcolnum[0] + shift_row * 2, rowcolnum[1] + shift_col, rowcolnum[2])
"""Control which tiles must stay on top"""
for rct in self._positions:
if rct[2] != 0:
itid, _ = self.get_itemid_from_rowcoltab(rct)
cfg.canvas.tag_raise(itid)
cfg.win.update()
"""Remove highlights"""
cfg.board.remove_all_highlights()
self.highlight_forced_and_matching()
"""Raise stipple rectangles"""
self.update_stipples()
"""Store shifts for sending to other client"""
cfg.shifts[0] += shift_row
cfg.shifts[1] += shift_col
"""Append to history"""
cfg.history.append([cfg.name,"shift=" + str(cfg.shifts)])
return True
def expand(self):
"""Change the position of some tiles when window is expanded"""
for rowcoltab in self._positions:
if rowcoltab[2] == -2:
itemid, ind = self.get_itemid_from_rowcoltab(rowcoltab)
x, y = cfg.board.off_to_pixel(rowcoltab)
cfg.canvas.coords(itemid, (x, y))
def alert(self, msg):
#Show alert only during game mode
import tkMessageBox
if msg is "hasquit":
tkMessageBox.showwarning("Notification", cfg.opponentname + " has quit!")
else:
tkMessageBox.showwarning("Notification", msg)
def log(self, msg = " "):
print(" =======>" + msg)
print(" Player %d - %s" %(cfg.player_num, cfg.name))
print(" forcedmove %s;freemvplayed %s" % (forcedmove, freemvplayed))
print(" cfg.turnUpDown=" + str(cfg.turnUpDown))
print(" cfg.player_num=" + str(cfg.player_num) + ", playerIsTabUp=" + str(cfg.playerIsTabUp))
print(" cfg.name/opponentname=" + str(cfg.name) + "/" + cfg.opponentname)
print(" cfg.deck.is_confirmable= " + str(self.is_confirmable(True) == ""))
print(" cfg.deck._positions=" + str(self._positions[0:4]))
print(" =" + str(self._positions[4:8]))
print(" =" + str(self._positions[8:]))
print(" cfg.deck._table=" + str(self._table))
print(" cfg.deck._positions_moved=" + str(self._positions_moved))
print(" cfg.deck._rotations=" + str(self._rotations))
print(" cfg.deck._confirmed[0]=" + str(self._confirmed[0]))
print(" cfg.deck._confirmed[1]=" + str(self._confirmed[1]))
print(" cfg.deck._confirmed[2]=" + str(self._confirmed[2]))
print(" cfg.deck.itemids=" + str(self.itemids))
#print(" cfg.deck.dealt=" + str(self.dealt))
#print(" cfg.board._highlightids=" + str(cfg.board._highlightids))
#print(" cfg.board._highlight=" + str(cfg.board._highlight))
#print(" cfg.turnUpDown free=" + str((cfg.turnUpDown, cfg.forcedmove)))
print(" <=======")
|
# -*- coding: utf-8 -*-
"""Sentinel Tools Test Config
"""
import sys, pytest, shutil
from os import path, mkdir
sys.path.insert(1, path.abspath(path.join(path.dirname(__file__), path.pardir)))
@pytest.fixture(scope="session")
def stTemp():
testDir = path.dirname(__file__)
tempDir = path.join(testDir, "temp")
if path.isdir(tempDir):
shutil.rmtree(tempDir)
if not path.isdir(tempDir):
mkdir(tempDir)
return tempDir
@pytest.fixture(scope="session")
def stInput():
testDir = path.dirname(__file__)
inDir = path.join(testDir, "input")
return inDir
@pytest.fixture(scope="session")
def stRef():
testDir = path.dirname(__file__)
refDir = path.join(testDir, "reference")
return refDir
|
"""
Absicis Acid Signaling - simulation
- plotting saved data
"""
import sys
from pylab import *
from boolean import util
import numpy
def make_plot( fname ):
obj = util.bload( fname )
data = obj['data']
muts = obj['muts']
genes=['WT','pHc','PA']
# standard deviations
def limit (x):
if x>1:
return 1
elif x<0:
return 0
else:
return x
subplot(122)
color=['b','c','r']
plots=[]
for gene,color in zip(genes,color):
means, std = data[gene]
plots.append(plot( means , linestyle = '-',color = color ))
upper = list(map(limit, means+std))
lower = list(map(limit, means-std))
plot( upper , linestyle = '--',color = color, lw=2 )
plot( lower , linestyle = '--', color = color , lw=2 )
legend( plots, "WT pHc PA".split(), loc='best' )
title( 'Variability of Closure in WT and knockouts' )
xlabel( 'Time Steps' )
ylabel( 'Percent' )
ylim( (0.0, 1.1) )
#
# Plots the effect of mutations on Closure
#
subplot(121)
coll = []
knockouts = 'WT S1P PA pHc ABI1'.split()
for target in knockouts:
p = plot( muts[target]['Closure'], 'o-')
coll.append( p )
legend( coll, knockouts, loc='best' )
title( 'Effect of mutations on Closure' )
xlabel( 'Time Steps' )
ylabel( 'Percent' )
ylim( (0, 1.1) )
if __name__ == '__main__':
figure(num = None, figsize=(14, 7), dpi=80, facecolor='w', edgecolor='k')
fname='ABA-run.bin'
make_plot( fname )
show() |
import sys
import scarletio.ext.asyncio
sys.modules['hata.ext.asyncio'] = sys.modules['scarletio.ext.asyncio']
|
# V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79462285
# IDEA : DFS
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
(row, col), directions = click, ((-1, 0), (1, 0), (0, 1), (0, -1), (-1, 1), (-1, -1), (1, 1), (1, -1))
if 0 <= row < len(board) and 0 <= col < len(board[0]):
if board[row][col] == 'M':
board[row][col] = 'X'
elif board[row][col] == 'E':
n = sum([board[row + r][col + c] == 'M' for r, c in directions if 0 <= row + r < len(board) and 0 <= col +c < len(board[0])])
board[row][col] = str(n if n else 'B')
if not n:
for r, c in directions:
self.updateBoard(board, [row + r, col + c])
return board
# V1'
# http://bookshadow.com/weblog/2017/02/26/leetcode-minesweeper/
# IDEA : BFS
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
w, h = len(board), len(board[0])
def countBoard(i, j):
cnt = 0
for di in (-1, 0, 1):
for dj in (-1, 0, 1):
ni, nj = i + di, j + dj
if ni < 0 or ni >= w or nj < 0 or nj >= h:
continue
if board[ni][nj] == 'M':
cnt += 1
return str(cnt) if cnt else 'B'
cx, cy = click
if board[cx][cy] == 'M':
board[cx][cy] = 'X'
return board
q = [click]
board[cx][cy] = countBoard(cx, cy)
if board[cx][cy] != 'B':
return board
while q:
ti, tj = q.pop(0)
for di in (-1, 0, 1):
for dj in (-1, 0, 1):
ni, nj = ti + di, tj + dj
if ni < 0 or ni >= w or nj < 0 or nj >= h:
continue
if board[ni][nj] == 'E':
board[ni][nj] = countBoard(ni, nj)
if board[ni][nj] == 'B':
q.append((ni, nj))
return board
# V1''
# IDEA : BFS
# https://leetcode.com/problems/minesweeper/discuss/284461/Python-BFS
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
R, C = len(board), len(board[0])
if board[click[0]][click[1]] == "M": board[click[0]][click[1]] = "X"
dir = [1,0], [0,1], [-1,0],[0,-1],[1,1],[-1,-1],[1,-1],[-1,1]
q = collections.deque()
q.append(click)
seen = set(click)
def numBombsTangent(board, i, j):
count = 0
for x, y in dir:
if 0 <= i + x < R and 0 <= j + y < C and board[i+x][y+j] == "M": count += 1
return count
while q:
for tup in range(len(q)):
x, y = q.popleft()
if board[x][y] == "E":
bombsNextTo = numBombsTangent(board, x, y)
board[x][y] = "B" if bombsNextTo == 0 else str(bombsNextTo)
if bombsNextTo == 0:
for a, b in dir:
if 0 <= a + x < R and 0 <= b + y < C and (a+x,b+y) not in seen:
q.append((a+x, b+y))
seen.add((a+x, b+y))
return board
# V1'''
# https://leetcode.com/problems/minesweeper/discuss/284461/Python-BFS
# IDEA : DFS
class Solution:
def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
x, y = click
surround = [(-1, 0), (1, 0), (0, 1), (0, -1), (1, -1), (1, 1), (-1, 1), (-1, -1)]
def available(x, y):
return 0 <= x < len(board) and 0 <= y < len(board[0])
def reveal(board, x, y):
# reveal blank cell with dfs
if not available(x, y) or board[x][y] != "E":
return
# count adjacent mines
mine_count = 0
for dx, dy in surround:
if available(dx+x, dy+y) and board[dx+x][dy+y] == "M":
mine_count += 1
if mine_count:
# have mines in adjacent cells
board[x][y] = str(mine_count)
else:
# not adjacent mines
board[x][y] = "B"
for dx, dy in surround:
reveal(board, dx+x, dy+y)
if board[x][y] == "M":
board[x][y] = "X"
elif board[x][y] == "E":
reveal(board, x, y)
return board
# V1''''
# https://leetcode.com/problems/minesweeper/discuss/144746/Python-BFS-%2B-DFS-with-comments
# IDEA : DFS, BFS
class Solution:
directions = [(-1, 0), (-1, -1), (-1, 1), (0,-1), (0, 1), (1, -1), (1, 0), (1, 1)]
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
# return self.bfs(board, click)
return self.dfs(board, click)
def dfs(self, board, click):
stack = [(click[0], click[1])]
m, n = len(board), len(board[0])
while stack:
r, c = stack.pop() # last inserted element
if board[r][c] == 'M':
board[r][c] = 'X'
break
# check for adjacent mines
mines = 0
for i, j in self.directions:
dr = r + i
dc = c + j
if 0 <= dr < m and 0 <= dc < n and board[dr][dc] == 'M':
mines += 1
board[r][c] = str(mines) if mines else 'B'
# add neighbors
for i, j in self.directions:
dr = r + i
dc = c + j
if 0 <= dr < m and 0 <= dc < n and board[r][c] == 'B' and board[dr][dc] == 'E':
stack.append((dr, dc))
return board
def bfs(self, board, click):
queue = [(click[0], click[1])]
m, n = len(board), len(board[0])
while queue:
r, c = queue.pop(0)
if board[r][c] == 'M':
board[r][c] = 'X'
break
# check for adjacent mines
mines = 0
for i, j in self.directions:
dr = r + i
dc = c + j
if 0 <= dr < m and 0 <= dc < n and board[dr][dc] == 'M':
mines += 1
board[r][c] = str(mines) if mines else 'B'
# add neighbors
for i, j in self.directions:
dr = r + i
dc = c + j
# BFS could potentially add duplicate (i,j) to the queue so we check that (i,j) is not already in the queue
if 0 <= dr < m and 0 <= dc < n and (dr,dc) not in queue and board[r][c] == 'B' and board[dr][dc] == 'E':
queue.append((dr, dc))
return board
# V2 |
import unittest
import vivisect.symboliks.archs.i386 as i386sym
import vivisect.symboliks.analysis as v_s_analysis
from vivisect.symboliks.common import Var, Const, cnot
from vivisect.symboliks.effects import ConstrainPath
import vivisect.tests.helpers as helpers
class IntelSymTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.i386_vw = helpers.getTestWorkspace('linux', 'i386', 'vdir.llvm')
def test_constraints(self):
fva = 0x080509e0
vw = self.i386_vw
ctx = v_s_analysis.getSymbolikAnalysisContext(vw, consolve=True)
g = ctx.getSymbolikGraph(fva)
constraints = {
0x080509e0: {
0x8050a42: ConstrainPath(0x080509ef, Const(0x08050a42, 4), Var("eflags_eq", width=4)),
0x80509f1: ConstrainPath(0x080509ef, Const(0x080509f1, 4), cnot(Var("eflags_eq", width=4))),
},
0x8050a20: {
0x8050a26: ConstrainPath(0x08050a24, Const(0x08050a26, 4), cnot(Var("eflags_eq", width=4))),
0x8050a42: ConstrainPath(0x08050a24, Const(0x08050a42, 4), Var("eflags_eq", width=4)),
},
0x8050a26: {
0x8050a20: ConstrainPath(0x08050a3e, Const(0x08050a20, 4), cnot(Var("eflags_eq", width=4))),
0x8050a40: ConstrainPath(0x08050a3e, Const(0x08050a40, 4), cnot(cnot(Var("eflags_eq", width=4)))),
},
0x80509f1: {
0x8050a44: ConstrainPath(0x08050a0c, Const(0x08050a44, 4), Var("eflags_eq", width=4)),
0x8050a0e: ConstrainPath(0x08050a0c, Const(0x08050a0e, 4), cnot(Var("eflags_eq", width=4)))
},
}
for eid, xfrom, xto, props in g.getEdges():
if xfrom not in constraints:
self.assertEqual(0, len(props))
continue
self.assertTrue('symbolik_constraints' in props)
self.assertEqual(1, len(props['symbolik_constraints']))
pcon = props['symbolik_constraints'][0]
self.assertEqual(pcon, constraints[xfrom][xto])
def test_emulator(self):
fva = 0x08051e10
vw = self.i386_vw
ctx = v_s_analysis.getSymbolikAnalysisContext(vw, consolve=True)
retn = [
'((mem[(arg0 + 24):4](edx,mem[(arg0 + 8):4],mem[0xbfbfefec:4],mem[0xbfbfeff0:4]) * 8) + mem[arg0:4])',
'0x08049a80()',
]
esps = [
Const(0xbfbfeff4, 4),
Const(0xbfbff004, 4),
]
for emu, effects in ctx.walkSymbolikPaths(fva):
self.assertTrue(str(emu.getFunctionReturn().reduce(foo=True)) in retn)
esp = emu.getSymVariable('esp').reduce(foo=True)
self.assertTrue(esp in esps)
self.assertEqual(emu.getSymVariable('ecx'), Var('arg0', 4))
def _cconv_test(self, caller, callee, argc, retn):
# some setup and other fun things
vw = self.i386_vw
# Note: The function itself isn't important, we just need a real Symbolik emulator
# instance that we can pass to setSymbolikArgs so that we can monkey with things
# from there
fva = 0x08051e10
ctx = v_s_analysis.getSymbolikAnalysisContext(vw, consolve=True)
argv = [Var('arg%d' % i, 4) for i in range(argc)]
emu = ctx.getFuncEmu(fva)
callee.setSymbolikArgs(emu, argv)
caller.setSymbolikArgs(emu, argv)
emu.setSymVariable('eax', retn)
self.assertEqual(caller.getSymbolikReturn(emu), retn)
self.assertEqual(caller.getSymbolikReturn(emu), callee.getSymbolikReturn(emu))
# if the callee cleans it up, the stack point is automatically assumed to be
# delta'd by 20 bytes for bfastcall (argc == 7, 3 are registers, so 4 stack args
# plus the return address makes 5 dwords to clean up for 20 bytes
bytes_cleaned = (1 + callee.getNumStackArgs(emu, argc)) * 4
self.assertEqual(callee.deallocateCallSpace(emu, argc).reduce(), Const(bytes_cleaned, 4))
# if the caller cleans things up, the instructions after are going to do it
# (since we're currently looking at things from a precall perspective), and so
# the only thing that is going to get cleaned up is the return address
self.assertEqual(caller.deallocateCallSpace(emu, argc).reduce(), Const(4, 4))
def test_ccconv_diff(self):
# msfastcall
# thiscall
# bfastcall
retval = Const(0xdeadbeef, 4)
self._cconv_test(i386sym.BFastCall_Caller(), i386sym.BFastCall(), 9, retval)
self._cconv_test(i386sym.ThisCall_Caller(), i386sym.ThisCall(), 27, retval)
self._cconv_test(i386sym.MsFastCall_Caller(), i386sym.MsFastCall(), 1, retval)
|
#!/usr/bin/env python3
import io
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
class Applicant(object):
def __init__(self, features):
self.features = features
class Boat(object):
def __init__(self, name, matriculation):
self.name = name
self.matriculation = matriculation
class PermitView(object):
def __init__(
self,
applicant,
boat=None,
site=['Parc national de Port-Cros'],
author='CAPEL',
title='Autorisation de ...',
subject='Plonger au coeur du parc',
template='assets/reglement_2017.pdf',
save_path='/dev/null'):
self.applicant = applicant
self.boat = boat
self.site = site
self.author = author
self.title = title
self.subject = subject
self.template = template
self.save_path = save_path
def save(self):
outputStream = io.BytesIO()
c = canvas.Canvas(outputStream, pagesize=A4)
c.setAuthor(self.author)
c.setTitle(self.title)
c.setSubject(self.subject)
font_size = 12
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(0, 0, 0)
c.setFont('Helvetica', font_size)
textobject = c.beginText()
for feature in self.applicant.features:
value, x, y = feature
textobject.setTextOrigin(x, y)
textobject.textLine(value)
if self.boat not in (None, 'null'):
value, x, y = self.boat
if isinstance(value, list):
value = ', '.join([boat for boat in value])
textobject.setTextOrigin(x, y)
textobject.textLine(value)
c.drawText(textobject)
c.save()
template = PdfFileReader(self.template, 'rb')
n = template.getNumPages()
user_data = PdfFileReader(outputStream)
outputStream.seek(0)
merged = PdfFileWriter()
for i in range(0, n - 1):
print('getNumPages:', i)
page = template.getPage(i)
merged.addPage(page)
page = template.getPage(n - 1)
page.mergePage(user_data.getPage(0))
merged.addPage(page)
with open(self.save_path, 'wb') as pdf_output:
merged.write(pdf_output)
|
from heapq import heappush, heappop, heapify
import numpy as np
from PIL import Image, ImageDraw, ImageMath
from random import seed, gauss, randrange, random
from sys import maxsize
seed(2)
reference = 'sonic.png'
im = Image.open(reference, 'r')
size = im.size
class Gene:
"""
Each organism has multiple genes. Genes are what are mutated and modified.
This gene contains a radius r, a rotation t and a color c
"""
def __init__(self, *args, **kwargs):
self.params = dict()
for k, v in self.mutables.items():
try:
left, right = self.bounds[k]
except KeyError:
left, right = None, None
if v == float: # initialize to random, [0, 1]
val = random() if not left else randrange(left, right)
self.__setattr__(k, v(val))
elif v == int:
start, end = (0, 100) if not left else (left, right)
self.__setattr__(k, v(randrange(start, end)))
elif v == Position:
xbnd, ybnd = size if not left else (left, right)
val = v(randrange(0, xbnd), randrange(0, ybnd))
self.__setattr__(k, val)
else:
self.__setattr__(k, v())
def __str__(self):
return str(self.params)
def __repr__(self):
return str(self)
class Color:
"""
A color consists of four floating point values, one for each of RGBA
"""
def __init__(self, r=0, g=0, b=0, a=1.0):
self.rgba = (r, g, b, a)
if r == g == b == 0 and a == 1.0:
self.randomize()
def randomize(self):
self.rgba = (randrange(0, 255),
randrange(0, 255),
randrange(0, 255),
randrange(0, 255))
def __str__(self):
return "({}, {}, {}, {})".format(*self.rgba)
def __repr__(self):
return str(self)
class Position:
def __init__(self, xpos=0.0, ypos=0.0):
self.position = (xpos, ypos)
if xpos == 0.0 and ypos == 0.0:
self.randomize()
def randomize(self):
self.position = (random(), random())
def __str__(self):
return "({}, {})".format(*self.position)
def __repr__(self):
return str(self)
class TriangleGene(Gene):
mutables = {'radius': float,
'color': Color,
'position': Position}
bounds = {'position': size, 'radius': (30, 70)}
@property
def verts(self):
x, y = self.position.position
rad = self.radius
return [(x - rad // 2, y), (x, y + rad), (x + rad // 2, y)]
def __init__(self):
super().__init__()
class Organism:
initial_population_size = 200
max_population = 600
number_of_genes = 100
mutation_rate = 0.01
crossover_rate = 0.7
kill_rate = 0.3
gene_split_rate = 0.3
"""
Ideas
Fitness:
- number of overlapping triangles
avg area of triangle - setpoint
Apply k-means clustering over two organisms to
find similar high density fitness areas
fitness chunks done in groups
variable length genes
"""
mutables = {'chromosome_length': float,
'mutation_rate': float,
'crossover_rate': float,
'kill_rate': float}
def __init__(self):
self.genes = list(TriangleGene() for _ in range(self.number_of_genes))
self.fitness = 0
@staticmethod
def fitness_of(arr):
sub = np.subtract(im, arr)
try:
return 1/np.mean(sub)
except ZeroDivisionError:
return maxsize
def crossover(self, other):
pass
organisms = list(Organism() for _ in range(Organism.initial_population_size))
def draw_org(organism):
img = Image.new("RGBA", size)
draw = ImageDraw.Draw(img)
for gene in organism.genes:
draw.polygon(gene.verts, gene.color.rgba)
return img
def draw_organisms(population, fn):
total_fitness = 0
for o in population:
img = draw_org(o)
fitness = o.fitness_of(img)
total_fitness += fitness
fn(id(o), fitness)
print(total_fitness, total_fitness / len(population))
draw_organisms(organisms, print)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.