max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/3D_plots.py | suryadheeshjith/episimmer | 0 | 12793651 | import sys
import ReadFile
import pickle
import World
import importlib.util
import os.path as osp
import policy_generator as pg
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
import numpy as np
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def get_example_path():
return sys.argv[1]
def get_config_path(path):
config_filepath=osp.join(path,'config.txt')
return config_filepath
def get_file_paths(example_path,config_obj):
# File Names
locations_filename=None
agents_filename=osp.join(example_path,config_obj.agents_filename)
interactions_FilesList_filename=osp.join(example_path,config_obj.interactions_files_list)
events_FilesList_filename=osp.join(example_path,config_obj.events_files_list)
if config_obj.locations_filename=="":
locations_filename=None
else:
locations_filename=osp.join(example_path,config_obj.locations_filename)
return agents_filename, interactions_FilesList_filename, events_FilesList_filename, locations_filename
def get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj):
# Reading through a file (for interactions/events) that contain file names which contain interactions and event details for a time step
interactions_files_list=None
events_files_list=None
if config_obj.interactions_files_list=='':
print('No Interaction files uploaded!')
else:
interactionFiles_obj=ReadFile.ReadFilesList(interactions_FilesList_filename)
interactions_files_list=list(map(lambda x : osp.join(example_path,x) ,interactionFiles_obj.file_list))
if interactions_files_list==[]:
print('No Interactions inputted')
if config_obj.events_files_list=='':
print('No Event files uploaded!')
else:
eventFiles_obj=ReadFile.ReadFilesList(events_FilesList_filename)
events_files_list=list(map(lambda x : osp.join(example_path,x) ,eventFiles_obj.file_list))
if events_files_list==[]:
print('No Events inputted')
return interactions_files_list, events_files_list
def get_model(example_path):
UserModel = module_from_file("Generate_model", osp.join(example_path,'UserModel.py'))
model = UserModel.UserModel()
return model
def get_policy(example_path):
Generate_policy = module_from_file("Generate_policy", osp.join(example_path,'Generate_policy.py'))
policy_list, event_restriction_fn=Generate_policy.generate_policy()
return policy_list, event_restriction_fn
if __name__=="__main__":
example_path = get_example_path()
config_filename = get_config_path(example_path)
# Read Config file using ReadFile.ReadConfiguration
config_obj=ReadFile.ReadConfiguration(config_filename)
agents_filename, interactions_FilesList_filename,\
events_FilesList_filename, locations_filename = get_file_paths(example_path,config_obj)
interactions_files_list, events_files_list = get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj)
# User Model
model = get_model(example_path)
# policy_list, event_restriction_fn=get_policy(example_path)
##########################################################################################
num_tests = 90
ntpa_max=6
napt_max=6
X=np.arange(1, napt_max+1, 1)
Y=np.arange(1, ntpa_max+1, 1)
X,Y = np.meshgrid(X,Y)
print(X)
print(Y)
data_list={'Infected':np.zeros((ntpa_max,napt_max)),'False Positives':np.zeros((ntpa_max,napt_max)),'Quarantined':np.zeros((ntpa_max,napt_max))}
for i in range(napt_max):
for j in range(ntpa_max):
policy_list, event_restriction_fn = pg.generate_group_testing_tests_policy(num_tests, i+1, j+1)
world_obj=World.World(config_obj,model,policy_list,event_restriction_fn,agents_filename,interactions_files_list,locations_filename,events_files_list)
tdict, total_infection, total_quarantined_days, wrongly_quarantined_days, total_test_cost = world_obj.simulate_worlds(plot=False)
data_list['Infected'][j][i]=total_infection
data_list['False Positives'][j][i]=world_obj.total_false_positives
data_list['Quarantined'][j][i]=total_quarantined_days
print(data_list)
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['False Positives']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total false positives")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Infected']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total infections")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Quarantined']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total quarantine")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
###############################################################################################
| 2.140625 | 2 |
odor_env/__init__.py | chinaheyu/pydog_training | 0 | 12793652 | from gym.envs.registration import register
register(
id='OdorEnvA-v0',
entry_point='odor_env.odor_env:OdorEnvA'
)
register(
id='OdorEnvB-v0',
entry_point='odor_env.odor_env:OdorEnvB'
)
| 1.507813 | 2 |
support_functions.py | DataCentric1/ponpare | 2 | 12793653 | #########################################################################################################
# Description: Collection of support functions that'll be used often
#
#########################################################################################################
import numpy as np
#########################################################################################################
__author__ = 'DataCentric1'
__pass__ = 1
__fail__ = 0
# Returns number of lines in a file in a memory / time efficient way
def file_len(fname):
i = -1
with open(fname) as f:
for i, l in enumerate(f, 1):
pass
return i
# Save numpy array from .npy file to txt file
def save_npy_array_to_txt(npy_fname, txt_fname):
np.savetxt(txt_fname, np.load(npy_fname), fmt='%s')
return __pass__
# Save numpy array from .npy file to csv file. TODO - Doublce check fn
def save_npy_array_to_csv(npy_fname, csv_fname):
temp_array = np.load(npy_fname)
index_row, index_col = temp_array.shape
print index_row
print index_col
f = open(csv_fname, 'w')
for i in range(index_row):
f.write(temp_array[i, 0])
f.write(",")
f.write(temp_array[i, 1])
f.write("\n")
f.close()
return __pass__
| 2.875 | 3 |
ARMODServers/Apps/ARExperiences/migrations/0005_auto_20210411_1029.py | Phantomxm2021/ARMOD-Dashboard | 1 | 12793654 | <reponame>Phantomxm2021/ARMOD-Dashboard
# Generated by Django 3.1.4 on 2021-04-11 02:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ARExperiences', '0004_auto_20210411_1019'),
]
operations = [
migrations.RenameField(
model_name='arexperienceasset',
old_name='arexperience_id',
new_name='pid',
),
]
| 1.59375 | 2 |
pycgr/py_cgr_client.py | ewb4/HDTN | 6 | 12793655 | from py_cgr.py_cgr_lib.py_cgr_lib import *
import zmq
import time
import sys
import random
import json
import re
import getopt
argumentList = sys.argv[1:]
# Options
options = "hc"
try:
# Parsing argument
arguments, values = getopt.getopt(argumentList, options)
# checking each argument
for currentArgument, currentValue in arguments:
if currentArgument in ("-h"):
print ("Use the option -m to specify the contact plan file location ")
sys.exit(0)
elif currentArgument in ("-c"):
print ("Contact plan file :", sys.argv[2])
except getopt.error as err:
# output error, and return with an error code
print (str(err))
port = "4555"
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://127.0.0.1:%s" % port) #localhost caused error
contact_plan = cp_load(sys.argv[2], 5000)
#contact_plan = cp_load('module/scheduler/src/contactPlan.json', 5000)
curr_time = 0
while True:
msg = socket.recv()
print("message received by server")
splitMessage = re.findall('[0-9]+', msg.decode('utf-8'))
splitMessage = list(filter(None, splitMessage))
sourceId = int(splitMessage[0])
destinationId = int(splitMessage[1])
startTime = int(splitMessage[2])
root_contact = Contact(sourceId, sourceId, 0, sys.maxsize, 100, 1, 0)
root_contact.arrival_time = startTime
route = cgr_dijkstra(root_contact, destinationId, contact_plan)
print("***Here's the route")
print(route)
print("***Sending next hop: " + str(route.next_node))
socket.send_string(str(route.next_node))
time.sleep(1)
| 2.484375 | 2 |
deployml/keras/load/base.py | deploy-ml/deploy-ml | 18 | 12793656 | <reponame>deploy-ml/deploy-ml
from keras.preprocessing.image import img_to_array
import numpy as np
import pickle
import keras
import sys
import cv2
class KerasLoader:
def __init__(self, file_path):
if file_path[-3:] == "sav":
package = pickle.load(open(file_path, "rb"))
else:
print("error wrong format no 'sav' found")
package = None
self.model = package["model"]
self.convolutional = package["convolutional"]
if self.convolutional:
self.dims_one, self.dims_two = package["image dims"]
self.scaling_tool = package["scaler"]
if self.scaling_tool:
self.scaled_inputs = True
else:
self.scaled_inputs = False
if package["system version"] != str(sys.version):
print("warning! model was trained in {}. You're running {}".format(package["system version"],
str(sys.version)))
if package["Keras Version"] != str(keras.__version__):
print("warning! model was trained on {}. You're running {}".format(package["Keras Version"],
str(keras.__version__)))
def calculate(self, input_array=None, happening=True, override=False, image=None):
"""
Calculates probability of outcome
:param input_array: array of inputs (should be same order as training data)
:param happening: if set False, returns probability of event not happening
:param override: set to True if you want to override scaling
:param image: image object that has been read
:return: float between 0 and 1
"""
if self.convolutional:
image = cv2.resize(image, (self.dims_one, self.dims_two))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
return self.model.predict(image)[0][0]
else:
if self.scaled_inputs and not override:
input_array = self.scaling_tool.transform(input_array)
if happening:
return self.model.predict([input_array])[0][0]
else:
return self.model.predict([input_array])[0][0] | 2.625 | 3 |
ex075.py | erikamaylim/Python-CursoemVideo | 0 | 12793657 | <filename>ex075.py<gh_stars>0
"""Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla.
No final, mostre:
A) Quantas vezes apareceu o valor 9.
B) Em que posição foi digitado o primeiro valor 3.
C) Quais foram os números pares."""
tuplanum = (int(input('1º valor: ')),
int(input('2º valor: ')),
int(input('3º valor: ')),
int(input('4º valor: ')))
print('Números digitados:', end=' ')
for n in tuplanum:
print(n, end=' ')
print(f'\nO nº 9 apareceu {tuplanum.count(9)} vez(es)')
if 3 not in tuplanum:
print('O nº 3 não apareceu em nenhuma posição')
else:
print(f'O valor 3 apareceu na {tuplanum.index(3) + 1}ª posição')
print('Valores pares digitados:', end=' ')
for c in tuplanum:
if c % 2 == 0:
print(c, end=' ')
| 3.71875 | 4 |
ja/code_snippets/results/result.api-tags-update.py | quotecenter/documentation-1 | 0 | 12793658 | {'host': 'hostname', 'tags': ['role:database','environment:test']}
| 1.148438 | 1 |
external/metadata/core.py | jtackaberry/stagehand | 8 | 12793659 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# core.py
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 <NAME>, <NAME>
#
# First Edition: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
# python imports
import re
import logging
from . import fourcc
from . import language
from . import utils
UNPRINTABLE_KEYS = [ 'thumbnail', 'url', 'codec_private' ]
# media type definitions
MEDIA_AUDIO = 'MEDIA_AUDIO'
MEDIA_VIDEO = 'MEDIA_VIDEO'
MEDIA_IMAGE = 'MEDIA_IMAGE'
MEDIA_AV = 'MEDIA_AV'
MEDIA_SUBTITLE = 'MEDIA_SUBTITLE'
MEDIA_CHAPTER = 'MEDIA_CHAPTER'
MEDIA_DIRECTORY = 'MEDIA_DIRECTORY'
MEDIA_DISC = 'MEDIA_DISC'
MEDIA_GAME = 'MEDIA_GAME'
MEDIACORE = ['title', 'caption', 'comment', 'size', 'type', 'subtype', 'timestamp',
'keywords', 'country', 'language', 'langcode', 'url', 'media', 'artist',
'mime', 'datetime', 'tags', 'hash']
EXTENSION_DEVICE = 'device'
EXTENSION_DIRECTORY = 'directory'
EXTENSION_STREAM = 'stream'
# get logging object
log = logging.getLogger('metadata')
class ParseError(Exception):
pass
_features = {
# Guess if a file is a recording of a TV series. It matches names in the
# style of 'series 1x01 episode' and 'series s1e01 episode' where the
# delimiter may not be a space but also point or minus.
'VIDEO_SERIES_PARSER':
[ False, '(.+?)[\. _-]+[sS]?([0-9]|[0-9][0-9])[xeE]([0-9]|[0-9][0-9])[\. _-]+(.+)' ]
}
def enable_feature(var, value=None):
"""
Enable optional features defined in the _feature variable. Some
feature have a value. These values are set to reasonable default
values but can be overwritten by setting the optional parameter
value.
"""
_features[var][0] = True
if value:
_features[var][1] = value
def features():
"""
List all optional features
"""
return list(_features.keys())
def feature_enabled(feature):
"""
Returns if a feature was activated
"""
return _features[feature][0]
def feature_config(feature):
"""
Returns the configuration of the given feature
"""
return _features[feature][1]
class Media(object):
media = None
"""
Media is the base class to all Media Metadata Containers. It defines
the basic structures that handle metadata. Media and its derivates
contain a common set of metadata attributes that is listed in keys.
Specific derivates contain additional keys to the dublin core set that is
defined in Media.
"""
_keys = MEDIACORE
table_mapping = {}
def __init__(self, hash=None):
if hash is not None:
# create Media based on dict
for key, value in list(hash.items()):
if isinstance(value, list) and value and isinstance(value[0], dict):
value = [ Media(x) for x in value ]
self._set(key, value)
return
self._keys = self._keys[:]
self.tables = {}
# Tags, unlike tables, are more well-defined dicts whose values are
# either Tag objects, other dicts (for nested tags), or lists of either
# (for multiple instances of the tag, e.g. actor). Where possible,
# parsers should transform tag names to conform to the Official
# Matroska tags defined at http://www.matroska.org/technical/specs/tagging/index.html
# All tag names will be lower-cased.
self.tags = Tags()
for key in self._keys:
if key not in ('media', 'tags'):
setattr(self, key, None)
#
# unicode and string convertion for debugging
#
def __str__(self):
result = ''
# print normal attributes
lists = []
for key in self._keys:
value = getattr(self, key, None)
if value == None or key == 'url':
continue
if isinstance(value, list):
if not value:
continue
elif isinstance(value[0], str):
# Just a list of strings (keywords?), so don't treat it specially.
value = ', '.join(value)
else:
lists.append((key, value))
continue
elif isinstance(value, dict):
# Tables or tags treated separately.
continue
if key in UNPRINTABLE_KEYS:
value = '<unprintable data, size=%d>' % len(value)
result += '| %10s: %s\n' % (str(key), str(value))
# print tags (recursively, to support nested tags).
def print_tags(tags, suffix, show_label):
result = ''
for n, (name, tag) in enumerate(tags.items()):
result += '| %12s%s%s = ' % ('tags: ' if n == 0 and show_label else '', suffix, name)
if isinstance(tag, list):
# TODO: doesn't support lists/dicts within lists.
result += '%s\n' % ', '.join(subtag.value for subtag in tag)
else:
result += '%s\n' % (tag.value or '')
if isinstance(tag, dict):
result += print_tags(tag, ' ', False)
return result
result += print_tags(self.tags, '', True)
# print lists
for key, l in lists:
for n, item in enumerate(l):
label = '+-- ' + key.rstrip('s').capitalize()
if key not in ('tracks', 'subtitles', 'chapters'):
label += ' Track'
result += '%s #%d\n' % (label, n+1)
result += '| ' + re.sub(r'\n(.)', r'\n| \1', str(item))
# print tables
if log.level >= 10:
for name, table in list(self.tables.items()):
result += '+-- Table %s\n' % str(name)
for key, value in list(table.items()):
try:
value = str(value)
if len(value) > 50:
value = '<unprintable data, size=%d>' % len(value)
except (UnicodeDecodeError, TypeError) as e:
try:
value = '<unprintable data, size=%d>' % len(value)
except AttributeError:
value = '<unprintable data>'
result += '| | %s: %s\n' % (str(key), value)
return result
def __repr__(self):
if hasattr(self, 'url'):
return '<%s %s>' % (str(self.__class__)[8:-2], self.url)
else:
return '<%s>' % (str(self.__class__)[8:-2])
#
# internal functions
#
def _appendtable(self, name, hashmap):
"""
Appends a tables of additional metadata to the Object.
If such a table already exists, the given tables items are
added to the existing one.
"""
if name not in self.tables:
self.tables[name] = hashmap
else:
# Append to the already existing table
for k in list(hashmap.keys()):
self.tables[name][k] = hashmap[k]
def _set(self, key, value):
"""
Set key to value and add the key to the internal keys list if
missing.
"""
if value is None and getattr(self, key, None) is None:
return
if isinstance(value, str):
value = utils.tostr(value)
setattr(self, key, value)
if not key in self._keys:
self._keys.append(key)
def _set_url(self, url):
"""
Set the URL of the source
"""
self.url = url
def _finalize(self):
"""
Correct same data based on specific rules
"""
# make sure all strings are unicode
for key in self._keys:
if key in UNPRINTABLE_KEYS:
continue
value = getattr(self, key)
if value is None:
continue
if key == 'image':
if isinstance(value, str):
setattr(self, key, utils.tobytes(value))
continue
if isinstance(value, str):
setattr(self, key, utils.tostr(value))
if isinstance(value, str):
setattr(self, key, value.strip().rstrip().replace('\0', ''))
if isinstance(value, list) and value and isinstance(value[0], Media):
for submenu in value:
submenu._finalize()
# copy needed tags from tables
for name, table in list(self.tables.items()):
mapping = self.table_mapping.get(name, {})
for tag, attr in list(mapping.items()):
if self.get(attr):
continue
value = table.get(tag, None)
if value is not None:
if not isinstance(value, str):
value = utils.tostr(str(value))
elif isinstance(value, str):
value = utils.tostr(value)
value = value.strip().rstrip().replace('\0', '')
setattr(self, attr, value)
if 'fourcc' in self._keys and 'codec' in self._keys and self.codec is not None:
# Codec may be a fourcc, in which case we resolve it to its actual
# name and set the fourcc attribute.
self.fourcc, self.codec = fourcc.resolve(self.codec)
if 'language' in self._keys:
self.langcode, self.language = language.resolve(self.language)
#
# data access
#
def __contains__(self, key):
"""
Test if key exists in the dict
"""
return hasattr(self, key)
def get(self, attr, default = None):
"""
Returns the given attribute. If the attribute is not set by
the parser return 'default'.
"""
return getattr(self, attr, default)
def __getitem__(self, attr):
"""
Get the value of the given attribute
"""
return getattr(self, attr, None)
def __setitem__(self, key, value):
"""
Set the value of 'key' to 'value'
"""
setattr(self, key, value)
def has_key(self, key):
"""
Check if the object has an attribute 'key'
"""
return hasattr(self, key)
def convert(self):
"""
Convert Media to dict.
"""
result = {}
for k in self._keys:
value = getattr(self, k, None)
if isinstance(value, list) and value and isinstance(value[0], Media):
value = [ x.convert() for x in value ]
result[k] = value
return result
def keys(self):
"""
Return all keys for the attributes set by the parser.
"""
return self._keys
class Collection(Media):
"""
Collection of Digial Media like CD, DVD, Directory, Playlist
"""
_keys = Media._keys + [ 'id', 'tracks' ]
def __init__(self):
Media.__init__(self)
self.tracks = []
class Tag(object):
"""
An individual tag, which will be a value stored in a Tags object.
Tag values are strings (for binary data), unicode objects, or datetime
objects for tags that represent dates or times.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tag, self).__init__()
self.value = value
self.langcode = langcode
self.binary = binary
def __unicode__(self):
return str(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
if not self.binary:
return '<Tag object: %s>' % repr(self.value)
else:
return '<Binary Tag object: size=%d>' % len(self.value)
@property
def langcode(self):
return self._langcode
@langcode.setter
def langcode(self, code):
self._langcode, self.language = language.resolve(code)
class Tags(dict, Tag):
"""
A dictionary containing Tag objects. Values can be other Tags objects
(for nested tags), lists, or Tag objects.
A Tags object is more or less a dictionary but it also contains a value.
This is necessary in order to represent this kind of tag specification
(e.g. for Matroska)::
<Simple>
<Name>LAW_RATING</Name>
<String>PG</String>
<Simple>
<Name>COUNTRY</Name>
<String>US</String>
</Simple>
</Simple>
The attribute RATING has a value (PG), but it also has a child tag
COUNTRY that specifies the country code the rating belongs to.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tags, self).__init__()
self.value = value
self.langcode = langcode
self.binary = False
| 1.625 | 2 |
sacrerouge/tests/fixtures/data/simetrix/setup.py | danieldeutsch/decomposed-rouge | 81 | 12793660 | <reponame>danieldeutsch/decomposed-rouge<gh_stars>10-100
"""
This script parses the sample data and output from the SIMetrix package and
saves it for unit testing.
"""
import argparse
import sys
from glob import glob
sys.path.append('.')
from sacrerouge.io import JsonlWriter # noqa
def load_sample_documents(input_dir: str):
documents = {}
for instance_id in ['input1', 'input2', 'input3']:
documents[instance_id] = []
for file_path in glob(f'{input_dir}/{instance_id}/*.txt'):
lines = open(file_path, 'r').read().splitlines()
sentences = []
for line in lines:
if line:
sentences.append(line)
documents[instance_id].append(sentences)
return documents
def load_sample_summaries(input_dir: str):
summaries = {}
for instance_id in ['input1', 'input2', 'input3']:
summaries[instance_id] = {}
for summarizer_id in ['fb', 'me', 'nb', 'rb', 'ts']:
summaries[instance_id][summarizer_id] = open(f'{input_dir}/{instance_id}.{summarizer_id}.txt', 'r').read().splitlines()
return summaries
def save_instances(documents, summaries, file_path):
with JsonlWriter(file_path) as out:
for instance_id in ['input1', 'input2', 'input3']:
for summarizer_id in ['fb', 'me', 'nb', 'rb', 'ts']:
docs = documents[instance_id]
summary = summaries[instance_id][summarizer_id]
out.write({
'instance_id': instance_id,
'summarizer_id': summarizer_id,
'documents': docs,
'summary': summary
})
def load_expected_summary_level_output(file_path: str):
metrics_list = []
lines = open(file_path, 'r').read().splitlines()
for i, line in enumerate(lines):
columns = line.split()
if i == 0:
header = columns
else:
metrics = {
'instance_id': columns[0],
'summarizer_id': columns[1],
'metrics': {}
}
for j, metric_name in enumerate(header[2:]):
metrics['metrics'][metric_name] = float(columns[j + 2])
metrics_list.append(metrics)
return metrics_list
def load_expected_system_level_output(file_path: str):
metrics_list = []
lines = open(file_path, 'r').read().splitlines()
for i, line in enumerate(lines):
columns = line.split()
if i == 0:
header = columns
else:
metrics = {
'summarizer_id': columns[0],
'metrics': {}
}
for j, metric_name in enumerate(header[1:]):
metrics['metrics'][metric_name] = float(columns[j + 1])
metrics_list.append(metrics)
return metrics_list
def save_metrics(metrics_list, file_path):
with JsonlWriter(file_path) as out:
for metrics in metrics_list:
out.write(metrics)
def main(args):
documents = load_sample_documents(f'{args.sample_eval_dir}/sampleInputs')
summaries = load_sample_summaries(f'{args.sample_eval_dir}/sampleSummaries')
save_instances(documents, summaries, f'{args.output_dir}/instances.jsonl')
summary_level = load_expected_summary_level_output(f'{args.sample_eval_dir}/sampleMappings.txt.ieval.micro')
system_level = load_expected_system_level_output(f'{args.sample_eval_dir}/sampleMappings.txt.ieval.macro')
save_metrics(summary_level, f'{args.output_dir}/metrics.summary-level.jsonl')
save_metrics(system_level, f'{args.output_dir}/metrics.system-level.jsonl')
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument('sample_eval_dir')
argp.add_argument('output_dir')
args = argp.parse_args()
main(args)
| 2.5 | 2 |
atomtools/__init__.py | atomse/atomtools | 0 | 12793661 | <reponame>atomse/atomtools<filename>atomtools/__init__.py<gh_stars>0
"""
independent chemical symbols
"""
__version__ = '1.9.4'
def version():
return __version__
| 1.21875 | 1 |
src/python/pcmdi/misc.py | gleckler1/pcmdi_metrics | 0 | 12793662 | <gh_stars>0
import cdms2 as cdms
import string, os
import ESMP
#### MAKE DIRECTORY
def mkdir_fcn(path):
try:
os.mkdir(path)
except:
pass
return
#### GET INHOUSE DATA THAT HAS BEEN TRANSFORMED/INTERPOLATED
def get_our_model_clim(data_location,var):
pd = data_location
# if var in ['tos','sos','zos']: pd = string.replace(pd,'atm.Amon','ocn.Omon')
f = cdms.open(pd)
try:
dm = f(var + '_ac')
except:
dm = f(var)
f.close()
return dm
#### GET CMIP5 DATA
def get_cmip5_model_clim(data_location,model_version, var):
lst = os.popen('ls ' + data_location + '*' + model_version + '*' + var + '.*.nc').readlines()
pd = lst[0][:-1] #data_location
# if var in ['tos','sos','zos']: pd = string.replace(pd,'atm.Amon','ocn.Omon')
f = cdms.open(pd)
try:
dm = f(var + '_ac')
except:
dm = f(var)
f.close()
print pd
return dm
########################################################################
#### GET OBSERVATIONAL DATA
def output_model_clims(dm,var,Tdir,F, model_version, targetGrid):
pathout = Tdir()
try:
os.mkdir(pathout)
except:
pass
F.variable = var
F.model_version = model_version
nm = F()
nm = string.replace(nm,'.nc','.' + targetGrid + '.nc')
dm.id = var
g = cdms.open(pathout + '/' + nm,'w+')
g.write(dm)
g.close()
def model_output_structure(dir_template, file_template, model_version, variable):
dir_template = "%(root_modeling_group_clim_directory)/%(test_case)/"
### CONSTRUCT PATH
D=genutil.StringConstructor(dir_template)
D.root_modeling_group_clim_directory = mod_data_path
D.test_case = test_case
data_location = D()
### CONSTRUCT FILENAME
F = genutil.StringConstructor(file_template)
F.model_version = model_version
F.table_realm = 'atm.Amon'
if variable in ['tos','sos','zos']: F.table_realm = 'ocn.Omon'
F.variable = variable
F.ext='nc'
F.period = '1980-2005'
filename = F()
return data_location,filename
def output_interpolated_model_data(dm, var, targetGrid,regrid_method,model_output_location):
model_output_location = string.replace(model_output_location,'.nc','.' + regrid_method + '.' + targetGrid + '.nc')
g = cdms.open(model_output_location,'w+')
dm.id = var
g.write(dm)
g.close()
| 2.015625 | 2 |
AtCoder/ABC/150/A.500 Yen Coins.py | shinkeonkim/today-ps | 2 | 12793663 | K,X=map(int,input().split())
print("Yes" if K*500>=X else "No" ) | 3.109375 | 3 |
training.py | zhangsiyu1103/ESNAC | 0 | 12793664 | <reponame>zhangsiyu1103/ESNAC
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import options as opt
import os
import time
import sys
import numpy as np
def init_model(model):
for module in model.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out',
nonlinearity='relu')
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, 0, 0.01)
nn.init.constant_(module.bias, 0)
return model
def test_model(model, dataset):
model.eval()
correct = 0
total = 0
loader = None
if hasattr(dataset, 'test_loader'):
loader = dataset.test_loader
elif hasattr(dataset, 'val_loader'):
loader = dataset.val_loader
else:
raise NotImplementedError('Unknown dataset!')
train_loader = dataset.train_loader
train_correct = 0
train_total = 0
best_train_acc = 0
#print(len(loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
outputs = model(inputs)
_, predicted = outputs.max(1)
train_total += targets.size(0)
train_correct += predicted.eq(targets).sum().item()
train_acc = 100.0 * train_correct / train_total
#print("train acc:", train_acc)
#if train_acc > best_train_acc:
# best_train_acc = train_acc
# model_best = model.module
# torch.save(model_best, 'temp_save/temp.pth')
#if train_acc == 100:
# torch.save(model.mudule, 'temp_save/base.pth')
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc = 100.0 * correct / total
return acc
def test_model_regression(model, dataset):
model.eval()
loader = None
loss_total = 0
batch_cnt = 0
criterion = nn.MSELoss()
if hasattr(dataset, 'test_loader'):
loader = dataset.test_loader
elif hasattr(dataset, 'val_loader'):
loader = dataset.val_loader
else:
raise NotImplementedError('Unknown dataset!')
#loader = dataset.train_loader
#print(len(loader))
train_loader = dataset.train_loader
train_loss_total = 0
train_batch_cnt = 0
#print(len(loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
outputs = model(inputs)
train_loss = criterion(outputs,targets)
train_loss_total+=train_loss.item()
train_batch_cnt += 1
train_loss_avg = train_loss_total / train_batch_cnt
print("train loss:", train_loss_avg)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
outputs = model(inputs)
loss = criterion(outputs,targets)
loss_total+=loss.item()
batch_cnt += 1
#_, predicted = outputs.max(1)
#total += targets.size(0)
#correct += predicted.eq(targets).sum().item()
#acc = 100.0 * correct / total
return loss_total/batch_cnt
def test_model_image(model, dataset):
model.eval()
correct = 0
total = 0
loader = None
if hasattr(dataset, 'test_loader'):
loader = dataset.test_loader
elif hasattr(dataset, 'val_loader'):
loader = dataset.val_loader
else:
raise NotImplementedError('Unknown dataset!')
miss = set()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
equal_v = predicted.eq(targets)
correct += equal_v.sum().item()
j = 0
#print(equal_v
for i in equal_v:
if not i:
miss.add(inputs[j])
#print(inputs[j])
j+=1
acc = 100.0 * correct / total
return acc, miss
def test_model_latency(model, dataset):
model.eval()
loader = None
if hasattr(dataset, 'test_loader'):
loader = dataset.test_loader
elif hasattr(dataset, 'val_loader'):
loader = dataset.val_loader
else:
raise NotImplementedError('Unknown dataset!')
latency = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
start_time = time.time()
outputs = model(inputs)
torch.cuda.synchronize()
time_taken = time.time() - start_time
latency.append(time_taken * 1000)
lat = np.mean(latency)
return lat
def train_model_teacher(model_, dataset, save_path, epochs=60, lr=0.005,
momentum=0.9, weight_decay=5e-4):
acc_best = 0
model_best = None
model = torch.nn.DataParallel(model_.to(opt.device))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
for i in range(1, epochs + 1):
print('epochs ', i)
model.train()
loss_total = 0
batch_cnt = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
loss_total += loss.item()
batch_cnt += 1
scheduler.step()
opt.writer.add_scalar('training/loss', loss_total / batch_cnt, i)
acc = test_model(model, dataset)
opt.writer.add_scalar('training/acc', acc, i)
print("loss: ", loss_total/batch_cnt)
print("test acc: ", acc)
if acc > acc_best:
acc_best = acc
model.module.acc = acc
model_best = model.module
torch.save(model_best, save_path)
return model_best, acc_best
def train_model_student(model_, dataset, save_path, idx,
optimization=opt.tr_fu_optimization,
epochs=opt.tr_fu_epochs, lr=opt.tr_fu_lr,
momentum=opt.tr_fu_momentum,
weight_decay=opt.tr_fu_weight_decay,
lr_schedule=opt.tr_fu_lr_schedule,
from_scratch=opt.tr_fu_from_scratch):
acc_best = 0
best_train_acc = 0
model_best = None
model = torch.nn.DataParallel(model_.to(opt.device))
criterion = nn.CrossEntropyLoss()
if optimization == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimization == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
if lr_schedule == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20,
gamma=0.2)
elif lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
if from_scratch:
init_model(model)
for i in range(1, epochs + 1):
print('epoch',i)
model.train()
#if lr_schedule == 'step':
# scheduler.step()
loss_total = 0
batch_cnt = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
optimizer.zero_grad()
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
#if lr_schedule == 'linear':
loss_total += loss.item()
batch_cnt += 1
scheduler.step()
opt.writer.add_scalar('training_%d/loss' % (idx), loss_total / batch_cnt, i)
acc = test_model(model, dataset)
opt.writer.add_scalar('training_%d/acc' % (idx), acc, i)
print('loss: ', loss_total/batch_cnt)
print('acc: ',acc)
if acc > acc_best:
acc_best = acc
model.module.acc = acc
model_best = model.module
torch.save(model_best, save_path)
return model_best, acc_best
def train_model_student_regression(model_, dataset, save_path, idx,
optimization=opt.tr_fu_optimization,
epochs=opt.tr_fu_epochs, lr=opt.tr_fu_lr,
momentum=opt.tr_fu_momentum,
weight_decay=opt.tr_fu_weight_decay,
lr_schedule=opt.tr_fu_lr_schedule,
from_scratch=opt.tr_fu_from_scratch):
loss_best = sys.maxsize
model_best = None
model = torch.nn.DataParallel(model_.to(opt.device))
criterion = nn.MSELoss()
if optimization == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimization == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
if lr_schedule == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10,
gamma=0.4)
elif lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
if from_scratch:
init_model(model)
for i in range(1, epochs + 1):
print('epoch',i)
model.train()
#if lr_schedule == 'step':
# scheduler.step()
loss_total = 0
batch_cnt = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
optimizer.zero_grad()
outputs = model(inputs)
#i_, predicted = outputs.max(1)
#total += targets.size(0)
#correct += predicted.eq(targets).sum().item()
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
#if lr_schedule == 'linear':
loss_total += loss.item()
batch_cnt += 1
#print("train acc: ", 100*correct/total)
scheduler.step()
opt.writer.add_scalar('training_%d/loss' % (idx), loss_total/batch_cnt , i)
test_loss = test_model_regression(model, dataset)
#opt.writer.add_scalar('training_%d/acc' % (idx), acc, i)
print('train loss: ', loss_total/batch_cnt)
print('test loss: ',test_loss)
if test_loss < loss_best:
loss_best = test_loss
model.module.loss = test_loss
model_best = model.module
torch.save(model_best, save_path)
return model_best, loss_best
def train_model_student_kd(teacher_, model_, dataset, save_path, idx,
optimization=opt.tr_fu_optimization,
epochs=opt.tr_fu_epochs, lr=opt.tr_fu_lr,
momentum=opt.tr_fu_momentum,
weight_decay=opt.tr_fu_weight_decay,
lr_schedule=opt.tr_fu_lr_schedule,
from_scratch=opt.tr_fu_from_scratch):
acc_best = 0
model_best = None
model = torch.nn.DataParallel(model_.to(opt.device))
teacher = torch.nn.DataParallel(teacher_.to(opt.device))
criterion1 = nn.CrossEntropyLoss()
criterion2 = nn.MSELoss()
if optimization == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimization == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
if lr_schedule == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100,
gamma=0.1)
elif lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
if from_scratch:
init_model(model)
for i in range(1, epochs + 1):
model.train()
#if lr_schedule == 'step':
# scheduler.step()
loss_total = 0
batch_cnt = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
teacher_outputs = None
with torch.no_grad():
teacher_outputs = teacher(inputs)
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
optimizer.zero_grad()
outputs = model(inputs)
loss1 = criterion1(outputs, targets)
loss2 = criterion2(outputs, teacher_outputs)
loss = loss1 + loss2
loss.backward()
optimizer.step()
#if lr_schedule == 'linear':
scheduler.step()
loss_total += loss.item()
batch_cnt += 1
opt.writer.add_scalar('training_%d/loss' % (idx), loss_total / batch_cnt, i)
acc = test_model(model, dataset)
opt.writer.add_scalar('training_%d/acc' % (idx), acc, i)
#print('loss: ', loss_total/batch_cnt)
#print('acc: ',acc)
if acc > acc_best:
acc_best = acc
model.module.acc = acc
model_best = model.module
torch.save(model_best, save_path)
return model_best, acc_best
def train_model_student_kd_reg(teacher_, model_, dataset, save_path, idx,
optimization=opt.tr_fu_optimization,
epochs=opt.tr_fu_epochs, lr=opt.tr_fu_lr,
momentum=opt.tr_fu_momentum,
weight_decay=opt.tr_fu_weight_decay,
lr_schedule=opt.tr_fu_lr_schedule,
from_scratch=opt.tr_fu_from_scratch):
loss_best = sys.maxsize
model_best = None
model = torch.nn.DataParallel(model_.to(opt.device))
teacher = torch.nn.DataParallel(teacher_.to(opt.device))
criterion = nn.MSELoss()
if optimization == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimization == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
if lr_schedule == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100,
gamma=0.1)
elif lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
if from_scratch:
init_model(model)
for i in range(1, epochs + 1):
model.train()
#if lr_schedule == 'step':
# scheduler.step()
loss_total = 0
#batch_cnt = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
teacher_outputs = None
with torch.no_grad():
teacher_outputs = teacher(inputs)
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
optimizer.zero_grad()
outputs = model(inputs)
loss1 = criterion(outputs, targets)
loss2 = criterion(outputs, teacher_outputs)
loss = loss1 + loss2
loss.backward()
optimizer.step()
#if lr_schedule == 'linear':
scheduler.step()
loss_total += loss.item()
#batch_cnt += 1
opt.writer.add_scalar('training_%d/loss' % (idx), loss_total, i)
test_loss = test_model_regression(model, dataset)
#opt.writer.add_scalar('training_%d/acc' % (idx), acc, i)
#print('loss: ', loss_total/batch_cnt)
#print('acc: ',acc)
if test_loss > loss_best:
loss_best = test_loss
model.module.loss = test_loss
model_best = model.module
torch.save(model_best, save_path)
return model_best, loss_best
def train_model_search(teacher_, students_, dataset,
optimization=opt.tr_se_optimization,
epochs=opt.tr_se_epochs, lr=opt.tr_se_lr,
momentum=opt.tr_se_momentum,
weight_decay=opt.tr_se_weight_decay,
lr_schedule=opt.tr_se_lr_schedule,
loss_criterion=opt.tr_se_loss_criterion):
n = len(students_)
accs_best = [0.0] * n
students_best = [None] * n
teacher = torch.nn.DataParallel(teacher_.to(opt.device))
students = [None] * n
for j in range(n):
students[j] = torch.nn.DataParallel(students_[j].to(opt.device))
if loss_criterion == 'KD':
criterion = nn.MSELoss()
elif loss_criterion == 'CE':
criterion = nn.CrossEntropyLoss()
if optimization == 'SGD':
optimizers = [optim.SGD(students[j].parameters(), lr=lr,
momentum=momentum, weight_decay=weight_decay)
for j in range(n)]
elif optimization == 'Adam':
optimizers = [optim.Adam(students[j].parameters(), lr=lr,
weight_decay=weight_decay) for j in range(n)]
if lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
schedulers = [optim.lr_scheduler.LambdaLR(optimizers[j],
lr_lambda=lr_lambda)
for j in range(n)]
for i in range(1, epochs + 1):
print("epochs:",i)
teacher.eval()
for j in range(n):
students[j].train()
loss_total = [0.0] * n
batch_cnt = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
inputs = inputs.to(opt.device)
if loss_criterion == 'KD':
teacher_outputs = None
with torch.no_grad():
teacher_outputs = teacher(inputs)
elif loss_criterion == 'CE':
targets = targets.to(opt.device)
for j in range(n):
if lr_schedule == 'linear':
schedulers[j].step()
optimizers[j].zero_grad()
#print(students[j])
student_outputs = students[j](inputs)
if loss_criterion == 'KD':
loss = criterion(student_outputs, teacher_outputs)
elif loss_criterion == 'CE':
loss = criterion(student_outputs, targets)
loss.backward()
optimizers[j].step()
loss_total[j] += loss.item()
batch_cnt += 1
for j in range(n):
opt.writer.add_scalar('step_%d/sample_%d_loss' % (opt.i, j),
loss_total[j] / batch_cnt, i)
acc = test_model(students[j], dataset)
#print("acc"+str(j)+": ", acc)
opt.writer.add_scalar('step_%d/sample_%d_acc' % (opt.i, j), acc, i)
if acc > accs_best[j]:
accs_best[j] = acc
students_best[j] = students[j].module
return students_best, accs_best
def train_model_search_reg(teacher_, students_, dataset,
optimization=opt.tr_se_optimization,
epochs=opt.tr_se_epochs, lr=opt.tr_se_lr,
momentum=opt.tr_se_momentum,
weight_decay=opt.tr_se_weight_decay,
lr_schedule=opt.tr_se_lr_schedule,
loss_criterion=opt.tr_se_loss_criterion):
n = len(students_)
loss_best = [sys.maxsize] * n
students_best = [None] * n
teacher = torch.nn.DataParallel(teacher_.to(opt.device))
students = [None] * n
for j in range(n):
students[j] = torch.nn.DataParallel(students_[j].to(opt.device))
if loss_criterion == 'KD' or loss_criterion == 'l2':
criterion = nn.MSELoss()
elif loss_criterion == 'CE':
criterion = nn.CrossEntropyLoss()
if optimization == 'SGD':
optimizers = [optim.SGD(students[j].parameters(), lr=lr,
momentum=momentum, weight_decay=weight_decay)
for j in range(n)]
elif optimization == 'Adam':
optimizers = [optim.Adam(students[j].parameters(), lr=lr,
weight_decay=weight_decay) for j in range(n)]
if lr_schedule == 'linear':
batch_cnt = len(dataset.train_loader)
n_total_exp = epochs * batch_cnt
lr_lambda = lambda n_exp_seen: 1 - n_exp_seen/n_total_exp
schedulers = [optim.lr_scheduler.LambdaLR(optimizers[j],
lr_lambda=lr_lambda)
for j in range(n)]
for i in range(1, epochs + 1):
print("epochs:",i)
teacher.eval()
for j in range(n):
students[j].train()
loss_total = [0.0] * n
batch_cnt = 0
for batch_idx, (inputs, targets) in enumerate(dataset.train_loader):
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
if loss_criterion == 'KD':
teacher_outputs = None
with torch.no_grad():
teacher_outputs = teacher(inputs)
elif loss_criterion == 'CE':
targets = targets.to(opt.device)
for j in range(n):
if lr_schedule == 'linear':
schedulers[j].step()
optimizers[j].zero_grad()
#print(students[j])
student_outputs = students[j](inputs)
if loss_criterion == 'KD':
loss1 = criterion(student_outputs, teacher_outputs)
loss2 = criterion(student_outputs, targets)
loss = loss1+loss2
elif loss_criterion == 'CE':
loss = criterion(student_outputs, targets)
loss.backward()
optimizers[j].step()
loss_total[j] += loss.item()
batch_cnt += 1
for j in range(n):
opt.writer.add_scalar('step_%d/sample_%d_loss' % (opt.i, j),
loss_total[j], i)
test_loss = test_model_regression(students[j], dataset)
#print("loss"+str(j)+": ", test_loss)
#opt.writer.add_scalar('step_%d/sample_%d_acc' % (opt.i, j), acc, i)
if test_loss < loss_best[j]:
loss_best[j] = test_loss
students_best[j] = students[j].module
return students_best, loss_best
| 2.5 | 2 |
tests/test_jsonutil.py | bhlarson/pymlutil | 0 | 12793665 | import sys
import os
import numpy as np
import cv2
import json
from collections import defaultdict
import unittest
import torch
sys.path.insert(0, os.path.abspath('')) # Test files from current path rather than installed module
from pymlutil.jsonutil import *
test_config = 'test.yaml'
class Test(unittest.TestCase):
def test_cmd(self):
result, _, _ = cmd('ls -la', check=True, timeout=5)
self.assertEqual(result, 0)
def test_yaml(self):
test = ReadDict(test_config)
assert test is not None
assert 'test_yaml' in test
self.assertEqual(test['test_yaml'][0]['zero'], 0)
self.assertEqual(test['test_yaml'][1]['one'], 1)
self.assertEqual(test['test_yaml'][2]['two'], 2)
if __name__ == '__main__':
unittest.main() | 2.546875 | 3 |
tests/test_proxy_digest_auth.py | untitaker/requests-toolbelt | 0 | 12793666 | # -*- coding: utf-8 -*-
"""Test proxy digest authentication
"""
import unittest
import requests
import requests_toolbelt
class TestProxyDigestAuth(unittest.TestCase):
def setUp(self):
self.username = "username"
self.password = "password"
self.auth = requests_toolbelt.auth.HTTPProxyDigestAuth(
self.username, self.password
)
self.auth.last_nonce = "bH3FVAAAAAAg74rL3X8AAI3CyBAAAAAA"
self.auth.chal = {
'nonce': self.auth.last_nonce,
'realm': '<EMAIL>',
'qop': 'auth'
}
self.prepared_request = requests.Request(
'GET',
'http://host.org/index.html'
).prepare()
def test_proxy_digest(self):
"""Test if it will generate Proxy-Authorization header
when nonce presents.
Digest authentication's correctness will not be tested here.
"""
# prepared_request headers should be clear before calling auth
assert not self.prepared_request.headers.get('Proxy-Authorization')
self.auth(self.prepared_request)
assert self.prepared_request.headers.get('Proxy-Authorization')
if __name__ == '__main__':
unittest.main()
| 2.9375 | 3 |
monotemplate.py | abderrahmen-hadjadj-aoul/monotemplate | 1 | 12793667 | """
"""
import sys
import re
import shutil
import json
if len(sys.argv) < 3:
print('Not enough input arguments')
exit()
################################################################################
# Options
comment = {'begin':'<!--', 'end':'-->'}
################################################################################
errors = []
in_file_path = sys.argv[1]
out_file_path = in_file_path
data_file_path = sys.argv[2]
if len(sys.argv) >= 4:
out_file_path = sys.argv[3]
else:
shutil.copyfile(out_file_path, out_file_path + '.tpl')
# Data
json1_file = open(data_file_path)
json1_str = json1_file.read()
data = json.loads(json1_str)
in_file = open(in_file_path)
in_lines = in_file.readlines()
out_lines = []
for in_line in in_lines:
if '<REPLACED>' in in_line or '<IGNORE>' in in_line or '<ERROR>' in in_line:
continue
# Find patterns
out_lines.append(in_line)
prog = re.compile(r'<REPLACE:([a-zA-Z0-9_]+)>')
key_list = prog.findall(in_line)
# Find
number_of_elem = 0
is_list = False
is_valid_list = False
for key in key_list:
if key in data and isinstance(data[key], list):
if is_list:
is_valid_list = is_valid_list and (len(data[key])==number_of_elem)
else:
number_of_elem = len(data[key])
is_valid_list = True
is_list = True
number_of_loop = number_of_elem
if number_of_loop == 0:
number_of_loop = 1
if is_list and not is_valid_list:
number_of_loop = 0
error = '<ERROR> Data list length are not consistent.'
errors.append(error)
out_lines.append(comment['begin'] + ' ' + error + comment['end'] + '\n')
for i in range(0,number_of_loop):
out_line = in_line
out_line = re.sub(r'^ *' + comment['begin'] + ' *(.*)' + comment['end'] + ' *', '\g<1>', out_line)
out_line = out_line.replace('\n', '')
for key in key_list:
if key in data:
if isinstance(data[key], list):
value = data[key][i]
else:
value = data[key]
out_line = out_line.replace('<REPLACE:' + key + '>', str(value))
else:
out_line = out_line.replace('<REPLACE:' + key + '>', '')
if len(key_list) > 0:
if key in data:
out_lines.append(out_line + ' ' + comment['begin'] + ' <REPLACED> ' + comment['end'] + '\n')
else:
error = '<ERROR> Key \'' + key + '\' not exiting.';
errors.append(error)
out_lines.append(comment['begin'] + ' ' + error + ' ' + comment['end'] + '\n')
out_file = open(out_file_path, 'w')
for out_line in out_lines:
out_file.write(out_line)
if len(errors) > 0:
print('\n***ERRORS***\n')
print(str(len(errors)) + ' errors in templating process:')
for error in errors:
print('\t' + error)
else:
print('No error in templating process')
| 2.859375 | 3 |
GameRoom/clienttest.py | gaogaotiantian/GameHost | 0 | 12793668 | <filename>GameRoom/clienttest.py
import socket
import time
if __name__ == "__main__":
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
conn = '/tmp/GameRoomConn'
sock.connect(conn)
time.sleep(1)
sock.send('hello world')
sock.close()
| 1.875 | 2 |
app/utils.py | mirsazzathossain/SPMS-Project | 0 | 12793669 | from django.db import connection
import numpy as np
def getstudentcoursewisePLO(studentID, courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(studentID, courseID))
row = cursor.fetchall()
return row
def getcoursewiseavgPLO(courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(courseID))
row = cursor.fetchall()
return row
def getcompletedcourses(studentID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT distinct s.course_id
FROM app_registration_t r,
app_evaluation_t e,
app_section_t s
WHERE r.registrationID = e.registration_id
and r.section_id = s.sectionID
and r.student_id = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getcorrespondingstudentid(userID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT studentID
FROM app_student_t s
WHERE s.user_ptr_id = '{}'
'''.format(userID))
row = cursor.fetchall()
return row
def getstudentprogramwisePLO(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_student_t s,
app_program_t pr
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and s.studentID = r.student_id
and s.program_id = pr.programID
GROUP BY p.ploID
'''.format(studentID))
row = cursor.fetchall()
return row
def getprogramwiseavgPLO(programID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = '{}'
GROUP BY p.ploID
'''.format(programID))
row = cursor.fetchall()
return row
def getstudentprogramid(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT s.program_id
FROM app_student_t s
WHERE s.studentID = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getstudentallcoursePLO(studentID, category):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as ploNum,co.course_id,sum(e.obtainedMarks),sum(a.totalMarks), derived.Total
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
(
SELECT p.ploNum as ploNum,sum(a.totalMarks) as Total, r.student_id as StudentID
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
GROUP BY r.student_id,p.ploID) derived
WHERE r.student_id = derived.StudentID
and e.registration_id = r.registrationID
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum = derived.ploNum
GROUP BY p.ploID,co.course_id
'''.format(studentID))
row = cursor.fetchall()
table = []
courses = []
for entry in row:
if entry[1] not in courses:
courses.append(entry[1])
courses.sort()
plo = ["PLO1", "PLO2", "PLO3", "PLO4", "PLO5", "PLO6", "PLO7", "PLO8", "PLO9", "PLO10", "PLO11", "PLO12"]
for i in courses:
temptable = []
if category == 'report':
temptable = [i]
for j in plo:
found = False
for k in row:
if j == k[0] and i == k[1]:
if category == 'report':
temptable.append(np.round(100 * k[2] / k[3], 2))
elif category == 'chart':
temptable.append(np.round(100 * k[2] / k[4], 2))
found = True
if not found:
if category == 'report':
temptable.append('N/A')
elif category == 'chart':
temptable.append(0)
table.append(temptable)
return plo, courses, table
def getfacultycoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.first_name, f.last_name, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
WHERE f.percentage >= 40
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row2 = cursor.fetchall()
faculty = []
plonum = []
plos1 = []
plos2 = []
for record in row1:
faculty.append(record[0]+' '+record[1])
plonum.append(record[2])
plos1.append(record[3])
for record in row2:
plos2.append(record[0])
plos = 100*(np.array(plos1)/np.array(plos2))
plos = plos.tolist()
faculty = list(set(faculty))
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
plos = np.array(plos)
plos = np.split(plos, len(plos)/len(plonum))
new_plo=[]
for plo in plos:
new_plo.append(plo.tolist())
return faculty, plonum, new_plo
def getsemestercoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.semester, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
WHERE f.percentage >= 40
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*) as all_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row2 = cursor.fetchall()
semester = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
semester.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
semester = list(set(semester))
plonum = list(set(plonum))
failed_per = 100 - acheived_per
acheived_per = np.split(acheived_per, len(acheived_per)/len(semester))
failed_per = np.split(failed_per, len(failed_per)/len(semester))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
failed=[]
for plo in failed_per:
failed.append(plo.tolist())
return semester, plonum, acheived, failed
def getplowisecoursecomparism(plos, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
ploo = '';
for plo in plos:
ploo += '"'
ploo += plo
ploo += '",'
ploo = ploo[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.course_id, f.ploNum, COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage >= 40
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row2 = cursor.fetchall()
courses = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
courses.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
courses = list(set(courses))
plonum = list(set(plonum))
acheived_per = np.split(acheived_per, len(acheived_per)/len(plonum))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
return courses, plonum, acheived
def getprogramsemesterwiseplocount(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.plonum, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.plonum;
'''.format(program, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.plonum;
'''.format(program, sem))
row2 = cursor.fetchall()
plonum = []
acheived = []
attempted = []
for record in row1:
plonum.append(record[0])
acheived.append(record[1])
for record in row2:
attempted.append(record[0])
plonum = list(set(plonum))
acheived = np.array(acheived)
attempted = np.array(attempted)
new_acheived=[]
for plo in acheived:
new_acheived.append(plo.tolist())
new_attempted=[]
for plo in attempted:
new_attempted.append(plo.tolist())
plonum.sort()
plonum.sort(key=len, reverse=False)
return plonum, new_acheived, new_attempted
def getprogramwiseploandcourses(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.ploNum, f.course_id, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, s.course_id, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.ploNum, f.course_id
'''.format(program, sem))
row = cursor.fetchall()
plonum = []
courses = []
counts = []
for record in row:
plonum.append(record[0])
courses.append(record[1])
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
courses = list(set(courses))
courses.sort()
table = np.zeros((len(courses), len(plonum)))
for record in row:
table[courses.index(record[1])][plonum.index(record[0])] += record[2]
table = table.tolist()
return plonum, courses, table | 2.28125 | 2 |
content_upload/get_marketo_email.py | tyron-pretorius/zapier | 0 | 12793670 |
#input variables to Zapier Step 12: Get Marketo Email ID
input={
'token': 'Token' #from Step 3: Get Marketo Access Token
'pid': 'Pid' #from Step 9: Clone Latest Marketo Program
'c_type': 'Content Type' #from Step 1: Get Submission from Google Form
}
import requests
import datetime
import re
if (input['c_type'] != 'Fact Sheet' and input['c_type'] != 'Infographic'):
pid = input['pid']
#https://developers.marketo.com/rest-api/assets/emails/#browse
url = 'https://###-xxx-###.mktorest.com/rest/asset/v1/emails.json?folder={\"id\":'+pid+',\"type\":\"Program\"}'
payload = {}
headers = {
'Authorization': 'Bearer ' + input['token']
}
response = requests.request("GET", url, headers=headers, data = payload)
raw=response.text
eid = re.search('{"id":(\d*),',response.text).group(1)
return {'email_id': eid}
| 2.59375 | 3 |
tests/test_logger.py | nimbinatus/python | 0 | 12793671 | <filename>tests/test_logger.py
import json
import unittest
from http.server import BaseHTTPRequestHandler
from logdna import LogDNAHandler
from .mock.server import get_port, start_server
from .mock.log import logger, info, LOGDNA_API_KEY
expectedLines = []
class SuccessfulRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
body = json.loads(body)['ls']
for keys in body:
expectedLines.append(keys['line'])
class FailedRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
self.rfile.read(content_length)
self.send_response(400)
self.end_headers()
class LogDNAHandlerTest(unittest.TestCase):
def server_recieves_messages(self):
port = get_port()
options = {
'hostname': 'localhost',
'url': 'http://localhost:{0}'.format(port),
'ip': '10.0.1.1',
'mac': 'C0:FF:EE:C0:FF:EE'
}
handler = LogDNAHandler(LOGDNA_API_KEY, options)
logger.addHandler(handler)
line = "python python python"
server_thread = start_server(port, SuccessfulRequestHandler)
logdna_thread = info(line)
server_thread.join()
logdna_thread.join()
self.assertEqual(len(expectedLines), 1)
self.assertIn(line, expectedLines)
logger.removeHandler(handler)
def messages_preserved_if_excp(self):
port = get_port()
options = {
'hostname': 'localhost',
'url': 'http://localhost:{0}'.format(port),
'ip': '10.0.1.1',
'mac': 'C0:FF:EE:C0:FF:EE'
}
handler = LogDNAHandler(LOGDNA_API_KEY, options)
logger.addHandler(handler)
line = "second test. server fails"
server_thread = start_server(port, FailedRequestHandler)
logdna_thread = info(line)
server_thread.join()
logdna_thread.join()
self.assertEqual(len(handler.buf), 1)
logger.removeHandler(handler)
def stops_retention_when_buf_is_full(self):
port = get_port()
options = {
'hostname': 'localhost',
'url': 'http://localhost:{0}'.format(port),
'ip': '10.0.1.1',
'mac': 'C0:FF:EE:C0:FF:EE',
'buf_retention_limit': 50,
'equest_timeout': 10,
'flush_interval': 1,
'retry_interval_secs': 1
}
handler = LogDNAHandler(LOGDNA_API_KEY, options)
logger.addHandler(handler)
line = "when buffer grows bigger than we want"
lineTwo = "when buffer grows bigger than we want. And more and more"
server_thread = start_server(port, FailedRequestHandler)
logdna_thread = info(line, lineTwo)
server_thread.join()
logdna_thread.join()
self.assertEqual(len(handler.buf), 1)
self.assertNotEqual(handler.buf[0]['line'], lineTwo)
logger.removeHandler(handler)
def test_run_tests(self):
self.server_recieves_messages()
self.messages_preserved_if_excp()
self.stops_retention_when_buf_is_full()
if __name__ == '__main__':
unittest.main()
| 2.6875 | 3 |
backend/src/pox/ext/gini/samples/packet_loss.py | anrl/gini4 | 11 | 12793672 | <gh_stars>10-100
#!/usr/bin/python2
"""
packet_loss.py: Simulates packet loss by dropping all packets with a
probability of 25%.
"""
import random
from pox.core import core
import pox.openflow.libopenflow_01 as of
def packet_in(event):
if random.random() >= 0.25:
msg = of.ofp_packet_out(data = event.ofp)
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
event.connection.send(msg)
def launch():
core.openflow.addListenerByName("PacketIn", packet_in)
| 2.3125 | 2 |
ampa/cole/migrations/0023_entitat_codi_registre.py | jordiprats/django-ampa | 0 | 12793673 | <gh_stars>0
# Generated by Django 3.1.5 on 2021-03-06 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cole', '0022_auto_20210306_1528'),
]
operations = [
migrations.AddField(
model_name='entitat',
name='codi_registre',
field=models.CharField(blank=True, default='', max_length=256, null=True),
),
]
| 1.460938 | 1 |
apps/dashboard/migrations/0009_generalsettings_location.py | iamjdcollins/districtwebsite | 0 | 12793674 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-17 15:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0009_boardprecinct_precinct_map'),
('dashboard', '0008_generalsettings_gatrackingid'),
]
operations = [
migrations.AddField(
model_name='generalsettings',
name='location',
field=models.ForeignKey(blank=True, help_text='<span>Select the primary location for the intity this site represents. This list is managed by the webmaster.</span>', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='dashboard_generalsettings_location', to='taxonomy.Location', verbose_name='Primary Location'),
),
]
| 1.65625 | 2 |
unittests/constants.py | ga4gh/gsoc2018-ref-retrieval-api | 6 | 12793675 | GOOD_SERVER_URL = "http://0.0.0.0:8989/"
BAD_SERVER_URL = "http://0.0.0.0:8988/"
JSON_REPORT = "test_report.json"
WEB_FILE_PATH = "test_web"
DEFAULT_WEB_DIR = "web" | 1.289063 | 1 |
openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/NewReport.py | ntiufalara/openerp7 | 3 | 12793676 | #########################################################################
#
# Copyright (c) 2003-2004 <NAME> <EMAIL>
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
#
#
#
# Start OpenOffice.org, listen for connections and open testing document
#
#
class NewReport(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win=DBModalDialog(60, 50, 180, 115, "Open New Report")
self.win.addFixedText("lblModuleSelection", 2, 2, 60, 15, "Module Selection")
self.win.addComboListBox("lstModule", -2,13,176,80 , False)
self.lstModule = self.win.getControl( "lstModule" )
self.aModuleName=[]
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global passwd
self.password = <PASSWORD>
global url
self.sock=RPCSession(url)
ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search',[])
fields = [ 'model','name']
res = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', ids, fields)
res.sort(lambda x, y: cmp(x['name'],y['name']))
for i in range(len(res)):
self.lstModule.addItem(res[i]['name'],self.lstModule.getItemCount())
self.aModuleName.append(res[i]['model'])
self.win.addButton('btnOK',-2 ,-5, 70,15,'Use Module in Report' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 70 - 5 ,-5, 35,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(3,self.aModuleName[self.lstModule.getSelectedItemPos()])
self.logobj.log_write('Module Name',LOG_INFO, ':Module use in creating a report %s using database %s' % (self.aModuleName[self.lstModule.getSelectedItemPos()], database))
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
NewReport(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( \
NewReport,
"org.openoffice.openerp.report.opennewreport",
("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.40625 | 1 |
codes/pandas/pandas-1-loading-and-displaying-data.py | sandywadhwa/machine-learning-in-python | 0 | 12793677 | <filename>codes/pandas/pandas-1-loading-and-displaying-data.py<gh_stars>0
import pandas
import numpy
import matplotlib
# READ CSV FILE
df = pandas.read_csv("../../data/uci-pima-indian-diabetes-data-original.csv")
# Print Dimensions of Data (Rows, Cols)
print("Dimensions of Data", df.shape)
# Print First n Rows
n = 5
print(df.head(n))
# Print Last n Rows
print(df.tail(n))
# Get One Particular Cell Value [col][row]
print(df["age"][1]) | 3.875 | 4 |
csrf/luhack_csrf/site.py | nitros12/luhack-web-lab | 0 | 12793678 | <reponame>nitros12/luhack-web-lab<gh_stars>0
import os
from collections import defaultdict
from pathlib import Path
from starlette.exceptions import HTTPException
import aiohttp
from dotenv import load_dotenv
from furl import furl
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.middleware.sessions import SessionMiddleware
from starlette.requests import HTTPConnection
from starlette.responses import RedirectResponse
from starlette.routing import Mount, Router
from wtforms import Form, StringField, IntegerField
from luhack_csrf.templater import templates
load_dotenv()
flag = os.getenv("CSRF_FLAG")
root_dir = Path(__file__).parent
router = Router()
class ClickForm(Form):
link = StringField("link")
@router.route("/")
async def index(request: HTTPConnection):
return templates.TemplateResponse("main.j2", {"request": request})
def redirect_response(*args, **kwargs):
kwargs.setdefault("status_code", 303)
return RedirectResponse(*args, **kwargs)
@router.route("/kerry")
class Kerry(HTTPEndpoint):
async def get(self, request: HTTPConnection):
form = ClickForm()
return templates.TemplateResponse(
"kerry.j2", {"request": request, "form": form}
)
async def post(self, request: HTTPConnection):
form = await request.form()
form = ClickForm(form)
link = furl(form.link.data)
if link.path.segments and link.path.segments[0] == "csrf":
link.netloc = "csrf:8080"
cookies = {"username": flag}
async with aiohttp.ClientSession(cookies=cookies) as session:
async with session.get(link.url) as response:
print(response.status)
return templates.TemplateResponse(
"kerry.j2", {"request": request, "form": form}
)
transactions = defaultdict(list)
class LoginForm(Form):
user = StringField("Username")
class TransferForm(Form):
dest = StringField("Destination")
amount = IntegerField("Amount")
@router.route("/bank")
async def bank(request: HTTPConnection):
login_form = LoginForm()
transfer_form = TransferForm()
current_user = request.cookies.get("username")
return templates.TemplateResponse(
"bank.j2",
{
"request": request,
"login_form": login_form,
"transfer_form": transfer_form,
"login_url": request.url_for("bank_login"),
"transfer_url": request.url_for("bank_transfer"),
"current_user": current_user,
"transactions": [] if current_user is None else transactions[current_user],
},
)
@router.route("/bank/login", methods=["POST"])
async def bank_login(request: HTTPConnection):
form = await request.form()
form = LoginForm(form)
r = redirect_response(url=request.url_for("bank"))
r.set_cookie("username", form.user.data)
return r
@router.route("/bank/transfer", methods=["GET", "POST"])
async def bank_transfer(request: HTTPConnection):
form = TransferForm(request.query_params)
if not form.validate():
login_form = LoginForm()
current_user = request.cookies.get("username")
return templates.TemplateResponse(
"bank.j2",
{
"request": request,
"login_form": login_form,
"transfer_form": form,
"login_url": request.url_for("bank_login"),
"transfer_url": request.url_for("bank_transfer"),
"current_user": current_user,
"transactions": [] if current_user is None else transactions[current_user],
},
)
from_ = request.cookies.get("username")
if from_ is None:
raise HTTPException(status_code=401, detail="Not logged in")
transaction = (from_, form.dest.data, form.amount.data)
print(transaction)
transactions[form.dest.data].append(transaction)
transactions[from_].append(transaction)
return redirect_response(url=request.url_for("bank"))
app = Starlette(routes=[
Mount("/csrf", app=router)
])
app.add_middleware(SessionMiddleware, secret_key="doesntmatter")
| 2.234375 | 2 |
pyaugmecon/__init__.py | vishalbelsare/pyaugmecon | 5 | 12793679 | from pyaugmecon.pyaugmecon import PyAugmecon
| 1.054688 | 1 |
feps/epitopeenergies.py | immunityproject/free-energy-post-substitution | 0 | 12793680 | <reponame>immunityproject/free-energy-post-substitution<gh_stars>0
"""
main.py - main functionality for feps tool
"""
from __future__ import print_function
import click
import csv
import sys
from collections import defaultdict
from feps.energy import load_db,combine_energy_mutations,codes,add_entropies
from feps.entropy import mean
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_epitope_energies(energies):
"""Create a map of epitopes -> epitopeinfo, [wt -> energies]
Users can then iterate via the peptide chain to grab the energies in order
This includes the structural energie, which is the mean of the
site entropies
"""
epitopedb = defaultdict(dict)
for _,entry in energies.items():
# Keys for the epitope db are unique to the epitope, so:
# name, peptide, startsite, endsite
name = entry['epitope']
peptide = entry['peptide']
startsite = entry['start']
endsite = entry['end']
site = entry['site']
chain = entry['chains']
if not name:
eprint('Skipping site {} as it does not match an epitope'.format(
site))
continue
key = '{},{},{},{},{}'.format(name, peptide, startsite, endsite, chain)
epitopedb[key]['epitope'] = name
epitopedb[key]['peptide'] = peptide
epitopedb[key]['peptide_status'] = entry['peptide_status']
epitopedb[key]['startsite'] = int(startsite)
epitopedb[key]['endsite'] = int(endsite)
epitopedb[key]['protein'] = entry['protein']
epitopedb[key]['chains'] = chain
wt = entry['wt']
peptide_state = list(epitopedb[key].get('peptide_state',
("-" * len(peptide))))
idx = int(site)-int(startsite)
if peptide_state[idx] != '-':
eprint('Overwriting peptide state {} at idx {}!'.format(
''.join(peptide_state), idx))
if peptide[idx] != wt:
eprint('Peptide mismatch at {}: {} expected, got {}'.format(
idx, peptide[idx], wt))
peptide_state[idx] = wt
epitopedb[key]['peptide_state'] = ''.join(peptide_state)
# Average energy for the wt at this peptide index
wtk = '{},{}'.format(idx,wt)
epitopedb[key][wtk] = mean([entry[x]
for x in codes.values()
if x in entry])
epitopedb[key][wtk + '-entropy'] = entry['shannon_entropy']
epitopedb[key][wtk + '-absolute-entropy'] = (
entry['absolute_shannon_entropy'])
epitopedb[key][wtk + '-boltzman-entropy'] = (
entry['boltzman_shannon_entropy'])
epitopedb[key][wtk + '-boltzman-absolute-entropy'] = (
entry['absolute_boltzman_shannon_entropy'])
# Now average energy and structural entropy
for _,v in epitopedb.items():
ps = v['peptide_state']
keys = ['{},{}'.format(i,ps[i]) for i in range(len(ps))
if ps[i] != '-']
v['structural_entropy'] = mean([v[k + '-entropy'] for k in keys])
v['absolute_structural_entropy'] = mean([v[k + '-absolute-entropy']
for k in keys])
v['boltzman_structural_entropy'] = mean([v[k + '-boltzman-entropy']
for k in keys])
v['boltzman_absolute_structural_entropy'] = mean(
[v[k + '-boltzman-absolute-entropy'] for k in keys])
v['average_energy'] = mean([v[k] for k in keys])
return epitopedb
@click.command()
@click.option('--database',
default='https://epitopedata.flowpharma.com/EpitopeData.json',
help='URL to a jsonl encoded file to dump')
@click.option('--ignore-mutation', default=[], multiple=True,
type=click.Choice(codes.values()),
help='Ignore these mutations')
def epitope_energies(database, ignore_mutation):
db = load_db(database)
amino_codes = [aa for aa in codes.values() if aa not in ignore_mutation]
energies = combine_energy_mutations(db, amino_codes)
energies = add_entropies(energies, amino_codes,
include_absolute_entropy = True,
include_boltzman_entropy= True,
include_absolute_boltzman_entropy= True)
epitope_energies = get_epitope_energies(energies)
sorted_keys = sorted(epitope_energies.keys(),
key=lambda x: int(x.split(',')[2]))
fieldnames = [ 'protein', 'epitope', 'peptide', 'peptide_status',
'peptide_state',
'startsite', 'endsite', 'chains', 'average_energy',
'structural_entropy', 'absolute_structural_entropy',
'boltzman_structural_entropy',
'boltzman_absolute_structural_entropy']
writer = None
for k in sorted_keys:
v = epitope_energies[k]
# Print header
if not writer:
writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames,
extrasaction='ignore')
writer.writeheader()
writer.writerow(v)
if __name__ == '__main__':
epitope_energies()
| 2.640625 | 3 |
PWN/2018WangDing/2018WangDing-3/pwn3-note2/exp3.py | Cossack9989/SEC_LEARNING | 13 | 12793681 | <filename>PWN/2018WangDing/2018WangDing-3/pwn3-note2/exp3.py<gh_stars>10-100
# Pwned locally
# After two big chunks freed and combined, the first chunk's size will emerge a bug that its size equals the sum of the two chunks
# Use this bug to override and lead to UAF
# And this UAF lead to AnyMemWrite with the fake_fd
# Meanwhile, fsb can leak libc_base/ret_addr/canary
from pwn import *
from binascii import *
r=process('./deathnote2')
r.send('\n')
#r.sendlineafter('tell me your name:','%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p')
r.sendlineafter('tell me your name:','%p%p%p%p%p%p%p%p%p%p%p%p%p\x00\x00\x00\x00\x00')
log.success('fsb leak')
r.recvuntil('0x70250x')
leak0=int(r.recv(12),16)
retaddr=leak0-0xd8
log.success('ReturnAddr='+str(hex(retaddr)))
r.recvuntil('0x')
leak1=int(r.recv(16),16)
canary=leak1
log.success('Canary='+str(hex(canary)))
r.recvuntil('(nil)0x')
leak2=int(r.recv(12),16)
libc_base=leak2-0x20830
og1=libc_base+0x45216
og2=libc_base+0x4526a
og3=libc_base+0xf02a4
og4=libc_base+0xf1147
log.success('LibcBase='+str(hex(libc_base)))
log.success('OneGadget='+str(hex(og1)))
r.recvuntil('Invalid!\n')
def WriteName(size,content):
r.sendlineafter('choice>>','1')
r.sendlineafter('Size:',str(size))
r.sendlineafter('Name:',content)
def ShowName(index):
r.sendlineafter('choice>>','3')
r.sendlineafter('Page:',str(index))
def DeleteName(index):
r.sendlineafter('choice>>','2')
r.sendlineafter('Page:',str(index))
def Bye():
r.sendlineafter('choice>>','4')
fake_fd=retaddr-0x1f
payload=p64(canary>>8)+p64((og1&0xff)<<56)+p64((0x01<<56)+(og1>>8))
WriteName(32,'00000000')#0
WriteName(192,'11111111')#1
WriteName(192,'22222222')#2
WriteName(32,'33333333')#3 avoid combination
DeleteName(2)
DeleteName(1)
WriteName(0x1a0,'1'*0xd0+p64(0xe0)+p64(fake_fd))#_1
WriteName(0xc0,'22222222')#_2
WriteName(0x50,payload)#4
Bye()
r.interactive() | 2 | 2 |
unittests/one_clock/gen.py | SubjeBilisim/anasymod | 20 | 12793682 | from pathlib import Path
from argparse import ArgumentParser
from msdsl import MixedSignalModel, VerilogGenerator
from msdsl.expr.extras import if_
def main():
print('Running model generator...')
# parse command line arguments
parser = ArgumentParser()
parser.add_argument('-o', '--output', type=str, default='build')
parser.add_argument('--dt', type=float, default=0.1e-6)
a = parser.parse_args()
# create the model
m = MixedSignalModel('osc', dt=a.dt)
m.add_digital_input('emu_clk')
m.add_digital_input('emu_rst')
m.add_digital_output('dt_req', 32)
m.add_digital_input('emu_dt', 32)
m.add_digital_output('clk_val')
m.add_digital_input('t_lo', 32)
m.add_digital_input('t_hi', 32)
# determine if the request was granted
m.bind_name('req_grant', m.dt_req == m.emu_dt)
# update the clock value
m.add_digital_state('prev_clk_val')
m.set_next_cycle(m.prev_clk_val, m.clk_val, clk=m.emu_clk, rst=m.emu_rst)
m.set_this_cycle(m.clk_val, if_(m.req_grant, ~m.prev_clk_val, m.prev_clk_val))
# determine the next period
m.bind_name('dt_req_next', if_(m.prev_clk_val, m.t_lo, m.t_hi))
# increment the time request
m.bind_name('dt_req_incr', m.dt_req - m.emu_dt)
# determine the next period
m.bind_name('dt_req_imm', if_(m.req_grant, m.dt_req_next, m.dt_req_incr))
m.set_next_cycle(m.dt_req, m.dt_req_imm, clk=m.emu_clk, rst=m.emu_rst, check_format=False)
# determine the output filename
filename = Path(a.output).resolve() / f'{m.module_name}.sv'
print(f'Model will be written to: {filename}')
# generate the model
m.compile_to_file(VerilogGenerator(), filename)
if __name__ == '__main__':
main()
| 2.4375 | 2 |
external/opensocdebug/software/src/bindings/python/examples/runelf.py | koenenwmn/optimsoc | 0 | 12793683 | <filename>external/opensocdebug/software/src/bindings/python/examples/runelf.py
import opensocdebug
import sys
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] <elffile>")
parser.add_option("--verify-memload",
action="store_true", dest="verify", default=False,
help="verify loaded memory")
parser.add_option("--no-ctm",
action="store_false", dest="ctm", default=True,
help="don't use CTM")
parser.add_option("--no-stm",
action="store_false", dest="stm", default=True,
help="don't use STM")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("missing ELF file")
elffile = args[0]
osd = opensocdebug.Session()
osd.reset(halt=True)
if options.stm:
for m in osd.get_modules("STM"):
m.log("stm{:03d}.log".format(m.get_id()))
if options.ctm:
for m in osd.get_modules("CTM"):
m.log("ctm{:03d}.log".format(m.get_id()), elffile)
for m in osd.get_modules("MAM"):
m.loadelf(elffile, options.verify)
osd.start()
osd.wait(120)
| 2.375 | 2 |
backend/pints/utils.py | ebunt/pulse | 63 | 12793684 | <reponame>ebunt/pulse<gh_stars>10-100
import os
from cryptography.fernet import Fernet
FERNET_KEY = os.environ.get('PAPER_FERNET_KEY')
def encrypt(e):
f = Fernet(FERNET_KEY)
e = e.encode("utf-8")
return f.encrypt(e).decode()
def decrypt(d):
f = Fernet(FERNET_KEY)
d = f.decrypt(d.encode("utf-8"))
return d.decode() | 2.46875 | 2 |
main.py | Mike-n-ike/deep-learning-for-pkd-patients | 0 | 12793685 | <gh_stars>0
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
# importing important librarires
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pydicom
def load_data(master_path, new_dimensions):
'''
This function is used to transform dicom images into numpy arrays with the desired dimensions
Parameters:
master_path (str): the path to the directory containing the scans of all patients
new_dimensions (list): a list containing the dimensions of the new scan [z,x,y]
Returns:
patients_resized (numpy.ndarray): a numpy array containing numpy arrays representing MRI scans of individual patients
'''
master_path = master_path + '/MR/'
patients_name = os.listdir(master_path)
patients_dcm = [] # contains the samples in dicom format
for patient in patients_name:
if patient == '.DS_Store':
continue
path = master_path + patient + '/T2SPIR/DICOM_anon'
patients_dcm.append(load_scan(path))
print("Dicom files loaded")
scans_not_square = not_square(patients_dcm)
[min_length, max_length], [min_width, max_width], [min_height, max_height] = get_extreme_dim(patients_dcm)
patients = []
for patient in patients_dcm:
patients.append(get_array(patient))
print("Dicom files converted to numpy array")
patients_resized = []
for patient in patients:
patients_resized.append(resize_data(patient, new_dimensions))
print("Scans have been resized to {size}".format(size=patients_resized[0].shape))
return patients_resized
def load_scan(path):
"""
This function is used to load the MRI scans.
Parameters:
path (str): The path to the folder containing the MRI scans of THE patient
Returns:
slices (list): A list of the slices consisting the MRI scan
"""
# for s in os.listdir(path):
# if s == '.DS_Store/':
# print("found ds")
# continue
# else:
# slices = [pydicom.read_file(path + '/' + s)]
slices = [pydicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: float(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except Exception:
print("error")
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_dimensions(patients_dcm):
"""
This function is used to get the dimensions of all scans
Parameters:
patients_dcm (list): A list containing the MRI scans. Each MRI scan is a list of the slices that make up the scan
Returns:
None
"""
for i in range(len(patients_dcm)):
print(patients_dcm[i][0].pixel_array.shape, len(patients_dcm[i]))
def not_square(patients_dcm):
"""
This function tells us if all the slices that make up a scan are squares.
Parameters:
patients_dcm (list): A list containing the MRI scans. Each MRI scan is a list of the slices that make up the scan
Returns:
scans_not_square (list): A list containing the indices of scans that contain non-square slices
"""
scans_not_square = []
for i in range(len(patients_dcm)):
# we compare only the first slice because all of them in one scan have the same dimension
if patients_dcm[i][0].pixel_array.shape[0] != patients_dcm[i][0].pixel_array.shape[1]:
scans_not_square.append(i)
print("Not all images are squares")
return scans_not_square
def get_array(scan):
'''
This function converts a scan into a numpy array
Parameters:
scan (list): A list containing the slices that make up an MRI scan
Returns:
np_image (numpy.ndarray): A numpy array representing the MRI scan
'''
image = np.stack([s.pixel_array for s in scan])
image = image.astype(np.int16)
np_image = np.array(image, dtype=np.int16)
return np_image
def resize_data(data, new_dimensions):
'''
This function resizes a numpy array.
TO DO: method used for interpolation?
Parameters:
data (numpy.ndarray): a numpy array representing an MRI scan
new_dimensions (list): a list containing the dimensions of the new scan [z,x,y]
Returns:
new_data (numpy.ndarray): a numpy array with the desired dimensions
'''
initial_size_x = data.shape[1]
initial_size_y = data.shape[2]
initial_size_z = data.shape[0]
new_size_z = new_dimensions[0]
new_size_x = new_dimensions[1]
new_size_y = new_dimensions[2]
delta_x = initial_size_x / new_size_x
delta_y = initial_size_y / new_size_y
delta_z = initial_size_z / new_size_z
new_data = np.zeros((new_size_z, new_size_x, new_size_y))
for x, y, z in itertools.product(range(new_size_x),
range(new_size_y),
range(new_size_z)):
new_data[z][x][y] = data[int(z * delta_z)][int(x * delta_x)][int(y * delta_y)]
return new_data
def get_extreme_dim(patients_dcm):
"""
This function gets the minimum and maximum dimensions of all scans
Paramters:
patients_dcm (list): A list containing the MRI scans. Each MRI scan is a list of the slices that make up the scan
Returns:
[min_length, max_length], [min_width, max_width], [min_height, max_height] (list): These lists contain the
minimum and maximum dimensions of all scans.
"""
lengths = []
widths = []
heights = []
square = True
scans_not_square = not_square(patients_dcm)
if len(scans_not_square) != 0:
square = False
# for i in range(len(patients_dcm)):
# if patients_dcm[i][0].pixel_array.shape[0] != patients_dcm[i][0].pixel_array.shape[1]:
# square = False
# print("Not all images are squares")
for i in range(len(patients_dcm)):
lengths.append(patients_dcm[i][0].pixel_array.shape[0])
heights.append(len(patients_dcm[i]))
if square == False:
widths.append(patients_dcm[i][0].pixel_array.shape[1])
max_length = max(lengths)
min_length = min(lengths)
max_height = max(heights)
min_height = min(heights)
print("Min length: {min_l} \nMax length: {max_l}\n".format(max_l=max_length, min_l=min_length))
if square == False:
max_width = max(widths)
min_width = min(widths)
print("Min width: {min_w} \nMax width: {max_w}\n".format(max_w=max_width, min_w=min_width))
else:
min_width, max_width = -1, -1
print("All images are squares\n")
print("Min height: {min_h} \nMax height: {max_h}\n".format(max_h=max_height, min_h=min_height))
return [min_length, max_length], [min_width, max_width], [min_height, max_height]
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
my_dir = os.getcwd()
pat = load_data(my_dir, [20, 100, 100])
plt.imshow(pat[0][10, :, :])
plt.savefig('mri_scan.png')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 3 | 3 |
Data Structure/Sort/Quick Sort/main.py | InnoFang/oh-my-algorithms | 19 | 12793686 | import random
def partition(arr, left, right):
l, r, tmp = left, right, arr[left]
while l != r:
while l < r and arr[r] >= tmp: r -= 1
while l < r and arr[l] <= tmp: l += 1
arr[l], arr[r] = arr[r], arr[l]
arr[l], arr[left] = arr[left], arr[l]
return l
def quick_sort(arr, left, right):
if left <= right:
pivot = partition(arr, left, right)
quick_sort(arr, left, pivot - 1)
quick_sort(arr, pivot + 1, right)
def main():
num = 20
range_left = 0
range_right = 10000
arr = [random.randint(range_left, range_right) for _ in range(num)]
print('Original array:')
print(arr)
quick_sort(arr, 0, len(arr) - 1)
print('Sorted array:')
print(arr)
if __name__ == '__main__':
main() | 3.890625 | 4 |
nyaa_updater.py | mhaidarh/nyaaupdater | 7 | 12793687 | <filename>nyaa_updater.py
#!/usr/bin/env python
"""
Nyaa.eu Feed Auto Updater
<NAME> (faizilham.com) 2013
"""
from nyaa_parser import fetch, download
from nyaa_db import NyaaSQLiteDB
from threading import Thread
import os, stat
DBNAME = '/home/pi/db/nyaa.db'
DOWNLOAD_DIR = '/home/pi/download/update/'
NUM_UPDATER = 4
NUM_DOWNLOADER = 4
class DownloadJob(Thread):
def __init__(self, links):
Thread.__init__(self)
self.links = links
def run(self):
for link in self.links:
filename, url = DOWNLOAD_DIR + link[1] + ".torrent", link[2]
download(url, filename)
os.chmod(filename, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
class UpdaterJob(Thread):
def __init__(self, db):
Thread.__init__(self)
self.db = db
def run(self):
self.links = []
self.updates = {}
for series, val in self.db.items():
url, pattern, last = val[0], val[1], val[2]
feeds = fetch(url, pattern)
if (feeds):
n = 0
while(n < len(feeds) and feeds[n]['name'] != last):
self.links.append((series, feeds[n]['name'], feeds[n]['link'], feeds[n]['date']))
n = n + 1
if (n != 0):
self.updates[series] = [None, None, feeds[0]['name']]
def db_updates(db, links, updates):
db.update(updates) # update `series` table
# update `updates` table
conn = db.connect()
conn.execute('CREATE TABLE IF NOT EXISTS updates (id_update INTEGER PRIMARY KEY AUTOINCREMENT, series_name TEXT NOT NULL, filename TEXT NOT NULL, url TEXT NOT NULL, pubdate TEXT NOT NULL)')
conn.executemany('INSERT INTO updates(series_name, filename, url, pubdate) VALUES (?, ?, ?, ?)', links)
conn.commit()
db.close()
# divide .torrent downloads into NUM_DOWNLOADER threads
item_per_job = len(links) / NUM_DOWNLOADER + (0 if len(links) % NUM_DOWNLOADER == 0 else 1)
temp, jobs = [], []
n, total = 0, 0
for link in links:
temp.append(link)
n, total = n + 1, total + 1
if(n==item_per_job or total == len(links)):
n = 0
job = DownloadJob(temp)
jobs.append(job)
job.start()
temp = []
for job in jobs:
job.join()
# all download finish
def update(db):
data = db.load()
# divide check series updates into NUM_UPDATER threads
item_per_job = len(data) / NUM_UPDATER + (0 if len(data) % NUM_UPDATER == 0 else 1)
temp, jobs = {}, []
n, total = 0, 0
for key, value in data.items():
temp[key] = value
n = n + 1
total = total + 1
if (n==item_per_job or total == len(data)):
n = 0
job = UpdaterJob(temp)
jobs.append(job)
job.start()
temp = {}
links, updates = [], {}
for job in jobs:
job.join()
links = links + job.links
updates.update(job.updates)
# all series checked
return links, updates
if __name__ == "__main__":
db = NyaaSQLiteDB(DBNAME)
links, updates = update(db)
if (links):
print len(links), "new updates found"
db_updates(db, links, updates)
else:
db.close()
| 2.59375 | 3 |
data/data_split.py | SCUT-AILab/DTQ | 20 | 12793688 | <filename>data/data_split.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/13 16:57
# @Author : xiezheng
# @Site :
# @File : data_split.py
import os
from scipy.io import loadmat
import shutil
def get_path_str(line):
line = str(line)
_, path, _ = line.split('\'')
# print('line={}, path={}'.format(line, path))
return path
def path_replace(line):
return line.replace('/', '\\')
def copy_img(root, list, save_path):
for i in range(list.shape[0]):
print('i={}'.format(i))
path = get_path_str(list[i][0])
source_img_path = path_replace(os.path.join(root, 'Images', path))
dir_, name = path.split('/')
target_img_dir = path_replace(os.path.join(save_path, dir_))
if not os.path.exists(target_img_dir):
os.makedirs(target_img_dir)
target_img_path = path_replace(os.path.join(target_img_dir, name))
print('source_img_path={}, target_img_path={}'.format(source_img_path, target_img_path))
shutil.copy(source_img_path, target_img_path)
if __name__ == '__main__':
print()
root = '\Stanford Dogs 120'
train_list = loadmat(os.path.join(root, 'train_list.mat'))['file_list']
save_train_path = '\Stanford Dogs 120\\train'
copy_img(root, train_list, save_train_path)
# test_list = loadmat(os.path.join(root, 'test_list.mat'))['file_list']
# save_test_path = '\Stanford Dogs 120\\test'
# copy_img(root, test_list, save_test_path) | 2.796875 | 3 |
make_data.py | kazurayam/ks_LogViewerSlowsDownTests_HowToPrevent | 0 | 12793689 | print('ID')
for number in range(1000):
print("#" + format(number, '0>4'))
| 2.96875 | 3 |
scrap_phat.py | Astroua/LocalGroup-VLA | 1 | 12793690 |
# Scrap all the brick mosaics for PHAT
import os
from os.path import join as osjoin
output = "/home/ekoch/bigdata/ekoch/M31/PHAT/"
baseurl = "https://archive.stsci.edu/pub/hlsp/phat/"
# This is easier than webscraping right now.
brick_dict = {1: 12058,
2: 12073,
3: 12109,
4: 12107,
5: 12074,
6: 12105,
7: 12113,
8: 12075,
9: 12057,
10: 12111,
11: 12115,
12: 12071,
13: 12114,
14: 12072,
15: 12056,
16: 12106,
17: 12059,
18: 12108,
19: 12110,
20: 12112,
21: 12055,
22: 12076,
23: 12070}
for i in range(1, 24):
if i < 10:
brickurl = f"{baseurl}/brick0{i}"
acs_475 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b0{i}_f475w_v1_drz.fits"
acs_814 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b0{i}_f814w_v1_drz.fits"
wfcir_110 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b0{i}_f110w_v1_drz.fits"
wfcir_160 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b0{i}_f160w_v1_drz.fits"
wfcuv_275 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b0{i}_f275w_v1_drz.fits"
wfcuv_336 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b0{i}_f336w_v1_drz.fits"
else:
brickurl = f"{baseurl}/brick{i}"
acs_475 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b{i}_f475w_v1_drz.fits"
acs_814 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b{i}_f814w_v1_drz.fits"
wfcir_110 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b{i}_f110w_v1_drz.fits"
wfcir_160 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b{i}_f160w_v1_drz.fits"
wfcuv_275 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b{i}_f275w_v1_drz.fits"
wfcuv_336 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b{i}_f336w_v1_drz.fits"
print(f"Downloading brick {i}")
brick_path = osjoin(output, f"brick{i}")
if not os.path.exists(brick_path):
os.mkdir(brick_path)
os.chdir(brick_path)
for file in [acs_475, acs_814, wfcir_110, wfcir_160, wfcuv_275, wfcuv_336]:
# Check if we need to download again
if os.path.exists(file):
continue
os.system(f"wget {osjoin(brickurl, file)}")
# os.system(f"wget {osjoin(brickurl, acs_814)}")
# os.system(f"wget {osjoin(brickurl, wfcir_110)}")
# os.system(f"wget {osjoin(brickurl, wfcir_160)}")
# os.system(f"wget {osjoin(brickurl, wfcuv_275)}")
# os.system(f"wget {osjoin(brickurl, wfcuv_336)}")
| 2.375 | 2 |
test_pipeline/utils/visualize.py | Garvit-32/LPRNet-Pytorch | 0 | 12793691 | #Color dict
import numpy as np
# import wandb
import cv2
import torch
import os
colors = {
'0':[(128, 64, 128), (244, 35, 232), (0, 0, 230), (220, 190, 40), (70, 70, 70), (70, 130, 180), (0, 0, 0)],
'1':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (255, 204, 54), (0, 0, 70), (220, 190, 40), (190, 153, 153), (174, 64, 67), (153, 153, 153), (70, 70, 70), (107, 142, 35), (70, 130, 180)],
'2':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (107, 142, 35), (70, 130, 180)],
'3':[(128, 64, 128), (250, 170, 160), (81, 0, 81), (244, 35, 232), (230, 150, 140), (152, 251, 152), (220, 20, 60), (246, 198, 145), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (136, 143, 153), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (150, 120, 90), (107, 142, 35), (70, 130, 180), (169, 187, 214), (0, 0, 142)]
}
def visualize(mask,n_classes,ignore_label,gt = None):
if(n_classes<len(colors['0'])):
id = 0
elif(n_classes<len(colors['1'])):
id = 1
elif(n_classes<len(colors['2'])):
id = 2
else:
id = 3
out_mask = np.zeros((mask.shape[0],mask.shape[1],3))
for i in range(n_classes):
out_mask[mask == i] = colors[str(id)][i]
if(gt is not None):
out_mask[gt == ignore_label] = (255,255,255)
out_mask[np.where((out_mask == [0, 0, 0]).all(axis=2))] = (255,255,255)
return out_mask
def error_map(pred,gt,cfg):
canvas = pred.copy()
canvas[canvas == gt] = 255
canvas[gt == cfg.Loss.ignore_label] = 255
return canvas
# def segmentation_validation_visualization(epoch,sample,pred,batch_size,class_labels,wandb_image,cfg):
# os.makedirs(os.path.join(cfg.train.output_dir,'Visualization',str(epoch)),exist_ok = True)
# input = sample['image'].permute(0,2,3,1).detach().cpu().numpy()
# label = sample['label'].detach().cpu().numpy().astype(np.uint8)
# pred = torch.argmax(pred[0],dim = 1).detach().cpu().numpy().astype(np.uint8)
# for i in range(batch_size):
# errormap = error_map(pred[i],label[i],cfg)
# wandb_image.append(wandb.Image(cv2.resize(cv2.cvtColor(input[i], cv2.COLOR_BGR2RGB),(cfg.dataset.width//4,cfg.dataset.height//4)), masks={
# "predictions" : {
# "mask_data" : cv2.resize(pred[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# },
# "ground_truth" : {
# "mask_data" : cv2.resize(label[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# ,
# "error_map" : {
# "mask_data" : cv2.resize(errormap,(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# }))
# if(cfg.valid.write):
# prediction = visualize(pred[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# mask = visualize(label[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# out = np.concatenate([((input[i]* np.array(cfg.dataset.mean) + np.array(cfg.dataset.std))*255).astype(int),mask,prediction,visualize(errormap,cfg.model.n_classes,cfg.Loss.ignore_label,label[i])],axis = 1)
# cv2.imwrite(os.path.join(cfg.train.output_dir,'Visualization',str(epoch),sample['img_name'][i]),out)
# return wandb_image
| 2.40625 | 2 |
Sketches/RJL/bittorrent/BitTorrent/BitTorrent/LaunchPath.py | sparkslabs/kamaelia_orig | 12 | 12793692 | <reponame>sparkslabs/kamaelia_orig
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# LaunchPath -- a cross platform way to "open," "launch," or "start"
# files and directories
# written by <NAME>
import os
can_launch_files = False
posix_browsers = ('gnome-open','konqueror',) #gmc, gentoo only work on dirs
default_posix_browser = ''
def launchpath_nt(path):
os.startfile(path)
def launchpath_mac(path):
# BUG: this is untested
os.spawnlp(os.P_NOWAIT, 'open', 'open', path)
def launchpath_posix(path):
if default_posix_browser:
os.spawnlp(os.P_NOWAIT, default_posix_browser,
default_posix_browser, path)
def launchpath(path):
pass
def launchdir(path):
if os.path.isdir(path):
launchpath(path)
if os.name == 'nt':
can_launch_files = True
launchpath = launchpath_nt
elif os.name == 'mac':
can_launch_files = True
launchpath = launchpath_mac
elif os.name == 'posix':
for b in posix_browsers:
if os.system("which '%s' >/dev/null 2>&1" % b.replace("'","\\'")) == 0:
can_launch_files = True
default_posix_browser = b
launchpath = launchpath_posix
break
| 2.0625 | 2 |
python/mxnet/gluon/probability/transformation/domain_map.py | pioy/incubator-mxnet | 211 | 12793693 | <filename>python/mxnet/gluon/probability/transformation/domain_map.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Classes for registering and storing bijection/transformations from
unconstrained space to a given domain.
"""
from numbers import Number
from .transformation import (
ExpTransform, AffineTransform, SigmoidTransform, ComposeTransform)
from ..distributions.constraint import (Constraint, Positive, GreaterThan, GreaterThanEq,
LessThan, Interval, HalfOpenInterval)
__all__ = ['domain_map', 'biject_to', 'transform_to']
class domain_map():
"""
Abstract Class for registering and storing mappings from domain
to bijections/transformations
"""
def __init__(self):
# constraint -> constraint -> transformation
self._storage = {}
super(domain_map, self).__init__()
def register(self, constraint, factory=None):
"""Register a bijection/transformation from unconstrained space to the domain
specified by `constraint`.
Parameters
----------
constraint : Type or Object
A class of constraint or an object of constraint
factory : callable
A function that outputs a `transformation` given a `constraint`,
by default None.
"""
# Decorator mode
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, Constraint):
constraint = type(constraint)
if not isinstance(constraint, type) or not issubclass(constraint, Constraint):
raise TypeError('Expected constraint to be either a Constraint subclass or instance, '
'but got {}'.format(constraint))
self._storage[constraint] = factory
return factory
def __call__(self, constraint):
try:
factory = self._storage[type(constraint)]
except KeyError:
raise NotImplementedError(
'Cannot transform {} constraints'.format(type(constraint).__name__))
return factory(constraint)
biject_to = domain_map()
transform_to = domain_map()
@biject_to.register(Positive)
@transform_to.register(Positive)
def _transform_to_positive(constraint):
# Although `constraint` is not used in this factory function,
# we decide to keep it for the purpose of consistency.
# pylint: disable=unused-argument
return ExpTransform()
@biject_to.register(GreaterThan)
@biject_to.register(GreaterThanEq)
@transform_to.register(GreaterThan)
@transform_to.register(GreaterThanEq)
def _transform_to_greater_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint._lower_bound, 1)])
@biject_to.register(LessThan)
@transform_to.register(LessThan)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint._upper_bound, -1)])
@biject_to.register(Interval)
@biject_to.register(HalfOpenInterval)
@transform_to.register(Interval)
@transform_to.register(HalfOpenInterval)
def _transform_to_interval(constraint):
# Handle the special case of the unit interval.
lower_is_0 = isinstance(constraint._lower_bound,
Number) and constraint._lower_bound == 0
upper_is_1 = isinstance(constraint._upper_bound,
Number) and constraint._upper_bound == 1
if lower_is_0 and upper_is_1:
return SigmoidTransform()
loc = constraint._lower_bound
scale = constraint._upper_bound - constraint._lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(loc, scale)])
| 1.96875 | 2 |
chains/front/network.py | thirionjl/chains | 2 | 12793694 | <filename>chains/front/network.py
import abc
from ..core import node_factory as f, initializers as init
from ..core.graph import Node, Graph
from ..core.static_shape import Dim
from ..utils import validate
from ..utils.naming import NameGenerator
class Network(abc.ABC):
def __init__(self):
self.cost_graph = None
self.predict_graph = None
self.inputs = None
self.labels = None
self.label_size = None
self.cnt_classes = None
def evaluate(self, x_test):
self.predict_graph.placeholders = {self.inputs: x_test}
return self.predict_graph.evaluate()
def evaluate_cost(self, x_train, y_train):
self.feed_cost_graph(x_train, y_train)
return self.cost_graph.evaluate()
def initialize_variables(self):
self.cost_graph.initialize_variables()
def feed_cost_graph(self, x_train, y_train):
self.cost_graph.placeholders = {self.inputs: x_train,
self.labels: y_train}
class Sequence(Network):
def __init__(self, cnt_features: int, layers, classifier,
regularizer=None):
super().__init__()
self.layer_names = NameGenerator()
self.cnt_features = Dim.of(cnt_features)
self.cnt_samples = Dim.unknown()
self.inputs = f.placeholder(
shape=(self.cnt_features, self.cnt_samples))
cost_graph, predict_graph, regularizable_vars = \
self.inputs, self.inputs, []
for pos, layer in enumerate(layers):
cost_graph, predict_graph, vars = layer.append(pos,
self.layer_names,
cost_graph,
predict_graph)
regularizable_vars.extend(vars)
self.cnt_classes = classifier.cnt_classes
self.label_size = classifier.label_size
self.labels = f.placeholder(
shape=(self.label_size, self.cnt_samples))
cost_graph, predict_graph = classifier.append(self.layer_names,
cost_graph,
predict_graph,
self.labels)
if regularizer is not None:
cost_graph = regularizer.append(self.layer_names, cost_graph,
regularizable_vars, self.inputs)
self.cost_graph = Graph(cost_graph)
self.predict_graph = Graph(predict_graph)
class SequenceElement(abc.ABC):
def __init__(self):
self.layer_name = None
self.var_name_generator = None
def new_var_name(self, prefix):
return self.var_name_generator.generate(prefix)
def prepare_names(self, name_generator: NameGenerator):
self.layer_name = name_generator.generate(
self.__class__.__name__).lower()
self.var_name_generator = NameGenerator(self.layer_name)
class Layer(SequenceElement, abc.ABC):
def append(self, pos: int, name_generator: NameGenerator,
logits: Node, labels: Node):
self.prepare_names(name_generator)
return self.do_append(pos, logits, labels)
@abc.abstractmethod
def do_append(self, pos: int, logits: Node, labels: Node):
pass
class Dense(Layer):
default_weight_initializer = init.HeInitializer()
default_bias_initializer = init.ZeroInitializer()
def __init__(self, neurons: int, weight_initializer=None,
bias_initializer=None):
self.neurons = neurons
self.weight_initializer = self.default_weight_initializer \
if weight_initializer is None else weight_initializer
self.bias_initializer = self.default_bias_initializer \
if bias_initializer is None else bias_initializer
def do_append(self, pos, cost_g, predict_g):
# TODO Allow different axis for the "features" and "examples" dimension
cnt_features = cost_g.shape[0]
w_name = self.new_var_name("W")
b_name = self.new_var_name("b")
w = f.var(w_name, self.weight_initializer,
shape=(self.neurons, cnt_features))
b = f.var(b_name, self.bias_initializer, shape=(self.neurons, 1))
cost_fc = f.fully_connected(cost_g, w, b, first_layer=(pos == 0),
name=self.layer_name)
predict_fc = f.fully_connected(predict_g, w, b, first_layer=(pos == 0),
name=self.layer_name + "_p")
return cost_fc, predict_fc, [w]
class ReLu(Layer):
def do_append(self, pos, cost_graph, predict_graph):
return f.relu(cost_graph, name=self.layer_name), \
f.relu(predict_graph, name=self.layer_name + "_p"), []
class LeakyReLu(Layer):
def do_append(self, pos, cost_graph, predict_graph):
return f.leaky_relu(cost_graph, name=self.layer_name), \
f.leaky_relu(predict_graph, name=self.layer_name + "_p"), \
[]
class Dropout(Layer):
def __init__(self, keep_prob=0.8):
if not (0 < keep_prob <= 1):
raise ValueError(f"Keep probability should be between 0 and 1")
self.keep_prob = keep_prob
def do_append(self, pos, cost_graph, predict_graph):
return f.dropout(self.keep_prob, cost_graph, name=self.layer_name), \
predict_graph, []
class BatchNorm(Layer):
default_beta_initializer = init.ZeroInitializer()
default_gamma_initializer = init.OneInitializer()
def __init__(self, beta_initializer=None,
gamma_initializer=None):
self.beta_initializer = self.default_beta_initializer \
if beta_initializer is None else beta_initializer
self.gamma_initializer = self.default_gamma_initializer \
if gamma_initializer is None else gamma_initializer
def do_append(self, pos, cost_graph, predict_graph):
cnt_features = cost_graph.shape[0]
beta_name = self.new_var_name("beta")
gamma_name = self.new_var_name("gamma")
beta = f.var(beta_name, self.beta_initializer, shape=(cnt_features, 1))
gamma = f.var(gamma_name, self.gamma_initializer,
shape=(cnt_features, 1))
bnt = f.batch_norm_train(cost_graph, beta, gamma, name=self.layer_name)
bnp = f.batch_norm_predict(bnt.op, predict_graph, beta, gamma,
name=self.layer_name + "_p")
return bnt, bnp, [beta, gamma]
class Classifier(SequenceElement, abc.ABC):
def __init__(self, label_size, cnt_classes):
self.layer_name = None
self.label_size = label_size
self.cnt_classes = cnt_classes
def append(self, name_generator, cost_graph: Node, predict_graph: Node,
labels: Node):
self.prepare_names(name_generator)
return self.do_append(cost_graph, predict_graph, labels)
@abc.abstractmethod
def do_append(self, cost_graph: Node, predict_graph: Node, labels: Node):
pass
class SigmoidBinaryClassifier(Classifier):
def __init__(self):
super().__init__(label_size=1, cnt_classes=2)
def do_append(self, cost_graph: Node,
predict_graph: Node, labels: Node):
sigmoid = f.sigmoid(predict_graph, name=self.layer_name + "_sigmoid")
cross_entropy = f.sigmoid_cross_entropy(cost_graph, labels,
name=self.layer_name)
gt = f.is_greater_than(sigmoid, 0.5, name=self.layer_name + "_gt")
return cross_entropy, gt
class SoftmaxClassifier(Classifier): # TODO axis
def __init__(self, classes: int):
validate.is_strictly_greater_than("classes", classes, 2)
super().__init__(label_size=classes, cnt_classes=classes)
def do_append(self, cost_graph: Node, predict_graph: Node, labels: Node):
entropy = f.softmax_cross_entropy(cost_graph, labels,
name=self.layer_name)
softmax = f.softmax(predict_graph, name=self.layer_name + "_softmax")
argmax = f.argmax(softmax, name=self.layer_name + "_argmax")
return entropy, argmax
class Regularizer(SequenceElement, abc.ABC):
def append(self, name_generator, cost_graph: Node, vars, inputs):
self.prepare_names(name_generator)
return self.do_append(cost_graph, vars, inputs)
@abc.abstractmethod
def do_append(self, cost_graph: Node, vars, inputs):
pass
class L2Regularizer(Regularizer):
def __init__(self, lambd=0.8):
self.lambd = lambd
def do_append(self, cost_graph: Node, vars, inputs):
dim = f.dim(inputs, name=self.layer_name + "_dim")
reg = f.l2_norm_regularizer(self.lambd, dim, vars,
name=self.layer_name)
if self.lambd > 0:
return cost_graph + reg
else:
return cost_graph
| 2.625 | 3 |
tests/browser/pages/external/events_registration.py | mayank-sfdc/directory-tests | 4 | 12793695 | <filename>tests/browser/pages/external/events_registration.py
# -*- coding: utf-8 -*-
"""Event Registration Page Object."""
from selenium.webdriver.remote.webdriver import WebDriver
from directory_tests_shared import URLs
from directory_tests_shared.enums import PageType, Service
from directory_tests_shared.utils import check_url_path_matches_template
NAME = "Registration"
SERVICE = Service.EVENTS
TYPE = PageType.FORM
URL = URLs.EVENTS_REGISTRATION.absolute_template
SELECTORS = {}
def should_be_here(driver: WebDriver):
check_url_path_matches_template(URL, driver.current_url)
| 2.015625 | 2 |
server/empous/utils.py | Slruh/Empous-Control-The-World | 1 | 12793696 | <reponame>Slruh/Empous-Control-The-World
#Takes in a list of games
def dictify_games(games, empous_user):
game_list = dict()
for game in games:
game_stats = dict()
game_stats["id"] = game.id
if game.victor == empous_user:
game_stats['isVictor'] = "yes"
else:
game_stats['isVictor'] = "no"
if game.current_player == empous_user:
game_stats['isTurn'] = "yes"
else:
game_stats['isTurn'] = "no"
game_stats['current_player'] = game.current_player.first_name
#Go through the enemies to give more info
players = list()
for player in game.players.all():
if player != empous_user:
players.append(player.first_name)
game_stats['enemies'] = players
game_stats['screenshot_url'] = game.screenshot_file.url
game_stats['json_state'] = game.json_serialized_game
game_list[game.id] = game_stats
return game_list
def join_with_commas_with_and(string_list):
if len(string_list) == 1:
return "".join(string_list)
elif len(string_list) == 2:
return " and ".join(string_list)
else:
joined_string = ", ".join(string_list[:-1])
joined_string = joined_string + ", and " + string_list[-1]
return joined_string | 2.96875 | 3 |
lnxproc/netsnmp.py | eccles/lnxproc | 1 | 12793697 | '''
Contains the NetSnmp() class
Typical contents of file /proc/net/snmp::
Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams
InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes
ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates
Ip: 1 64 2354322 0 0 0 0 0 2282006 2066446 0 0 0 0 0 0 0 0 0
Icmp: InMsgs InErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs
InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks
InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds
OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps
OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps
Icmp: 172 0 91 0 0 0 0 81 0 0 0 0 0 168 0 87 0 0 0 0 0 81 0 0 0 0
IcmpMsg: InType3 InType8 OutType0 OutType3
IcmpMsg: 91 81 81 87
Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails
EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts
Tcp: 1 200 120000 -1 70054 4198 337 2847 43 1880045 1741596 7213 0 3044
Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors
Udp: 344291 8 376 317708 0 0
UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors
UdpLite: 0 0 0 0 0 0
'''
from logging import getLogger
from os import path as ospath
from .readfile import ReadFile
LOGGER = getLogger(__name__)
class NetSnmp(ReadFile):
'''
NetSnmp handling
'''
FILENAME = ospath.join('proc', 'net', 'snmp')
KEY = 'netsnmp'
def normalize(self):
'''
Translates data into dictionary
The net/snmp file is a series of records keyed on subcategories
'''
LOGGER.debug("Normalize")
lines = self.lines
ret = {}
fkey = ''
fvals = []
for i, line in enumerate(lines):
top, tail = line.split(':')
key = top.lstrip()
vals = tail.lstrip().split()
if i % 2:
if fkey == key:
ret[key] = dict(
zip(
fvals,
[int(val) for val in vals]
)
)
else:
fkey = key
fvals = vals
return ret
| 1.773438 | 2 |
models/cae.py | TimCJanke/ImplicitGenerativeCopulas | 0 | 12793698 | """
@author: <NAME>, Energy Information Networks & Systems @ TU Darmstadt
"""
import numpy as np
import tensorflow as tf
from models.igc import ImplicitGenerativeCopula, GMMNCopula
from models.utils import cdf_interpolator
import pyvinecopulib as pv
from models import mv_copulas
import matplotlib.pyplot as plt
class CopulaAutoEncoder(object):
def __init__(self, x, ae_model):
if isinstance(ae_model, str):
ae_model = tf.keras.models.load_model(ae_model)
self.encoder_model = ae_model.encoder
self.decoder_model = ae_model.decoder
self.z = self._encode(x)
self.margins = self._fit_margins(self.z)
self.u = self._cdf(self.z)
def _encode(self, x):
# encode images to latent space
return self.encoder_model(x).numpy()
def _decode(self, z):
# decode latent space samples to images
return self.decoder_model(z).numpy()
def _cdf(self, z):
# get pseudo obs
u = np.zeros_like(z)
for i in range(u.shape[1]):
u[:,i] = self.margins[i].cdf(z[:,i])
return u
def _ppf(self, u):
# inverse marginal cdf
z = np.zeros_like(u)
for i in range(z.shape[1]):
z[:,i] = self.margins[i].ppf(u[:,i])
return z
def _fit_margins(self, z):
# get the marginal distributions via ecdf interpolation
margins = []
for i in range(z.shape[1]):
margins.append(cdf_interpolator(z[:,i],
kind="linear",
x_min=np.min(z[:,i])-np.diff(np.sort(z[:,i])[0:2])[0],
x_max=np.max(z[:,i])+np.diff(np.sort(z[:,i])[-2:])[0]))
return margins
def _sample_u(self, n_samples=1):
# sample from copula
return self.copula.simulate(n_samples)
def _sample_z(self, n_samples=1, u=None):
# sample from latent space
if u is None:
return self._ppf(self._sample_u(n_samples))
else:
return self._ppf(u)
def sample_images(self, n_samples=1, z=None):
# sample an image
if z is None:
return self._decode(self._sample_z(n_samples))
else:
return self._decode(z)
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout()
class IGCAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Implicit Generative Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=1000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class GMMNCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with GMMN Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=10000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class VineCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Vine Copula """
def fit(self, families="nonparametric", show_trace=False, trunc_lvl=18446744073709551615):
if families == "nonparametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.tll], trunc_lvl=trunc_lvl, show_trace=show_trace)
elif families == "parametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.indep,
pv.BicopFamily.gaussian,
pv.BicopFamily.student,
pv.BicopFamily.clayton,
pv.BicopFamily.gumbel,
pv.BicopFamily.frank,
pv.BicopFamily.joe,
pv.BicopFamily.bb1,
pv.BicopFamily.bb6,
pv.BicopFamily.bb7,
pv.BicopFamily.bb8],
trunc_lvl=trunc_lvl,
show_trace=show_trace)
else:
controls = pv.FitControlsVinecop(trunc_lvl=trunc_lvl, show_trace=show_trace)
self.copula = pv.Vinecop(data=self.u, controls=controls)
def save_model(self, path):
self.copula.to_json(path)
print(f"Saved vine copula model to {path}.")
def load_model(self, path):
self.copula = pv.Vinecop(filename=path)
print("Loaded vine copula model.")
class GaussianCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Gaussian Copula """
def fit(self):
self.copula = mv_copulas.GaussianCopula()
self.copula.fit(self.u)
class IndependenceCopulaCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Independence Copula """
def fit(self):
pass
def _sample_u(self, n_samples):
return np.random.uniform(0.0, 1.0, size=(n_samples, self.u.shape[1]))
class VariationalAutoEncoder(object):
def __init__(self, decoder_model="models/autoencoder/VAE_decoder_fashion_mnist_100epochs", latent_dim=25):
if isinstance(decoder_model, str):
self.decoder_model = tf.keras.models.load_model(decoder_model)
else:
self.decoder_model = decoder_model
self.decoder_model.compile()
self.latent_dim = 25
def _sample_z(self, n_samples):
# sample from latent space
return np.random.normal(loc=0.0, scale=1.0, size=(n_samples, self.latent_dim))
def _decode(self,z):
return self.decoder_model.predict(z)
def fit(self):
pass
def sample_images(self, n_samples):
# sample an image
return self._decode(self._sample_z(n_samples))
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout() | 2.34375 | 2 |
Examples/Actor.py | VincentsCode/Smart-Room-Projekt | 0 | 12793699 | import socket
import Constants
# Create a TCP/IP socket
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.bind(('', 1339))
socket.listen(1)
while True:
connection, client_address = socket.accept()
print('connection from', client_address)
while True:
# noinspection PyBroadException
try:
data = connection.recv(16)
msg = str(data, "utf8")
msg = msg.replace("#", "")
print(msg)
connection.sendall(bytes(Constants.ANSWER_POSITIVE, "utf8"))
except:
break
| 2.96875 | 3 |
pytools/lib/readqc_report.py | virtualparadox/bbmap | 134 | 12793700 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Readqc report: record stat key-value in readqc-stats.txt
### JGI_Analysis_Utility_Illumina::illumina_read_level_report
Created: Jul 24 2013
sulsj (<EMAIL>)
"""
import os
import sys
## custom libs in "../lib/"
srcDir = os.path.dirname(__file__)
sys.path.append(os.path.join(srcDir, 'tools')) ## ./tools
sys.path.append(os.path.join(srcDir, '../lib')) ## rqc-pipeline/lib
sys.path.append(os.path.join(srcDir, '../tools')) ## rqc-pipeline/tools
from readqc_constants import RQCReadQcConfig, ReadqcStats
from rqc_constants import RQCExitCodes
from os_utility import run_sh_command
from common import append_rqc_stats, append_rqc_file
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
"""
Title : read_megablast_hits
Function : This function generates tophit list of megablast against different databases.
Usage : read_megablast_hits(db_name, log)
Args : blast db name or full path
Returns : SUCCESS
FAILURE
Comments :
"""
def read_megablast_hits(db, log):
currentDir = RQCReadQcConfig.CFG["output_path"]
megablastDir = "megablast"
megablastPath = os.path.join(currentDir, megablastDir)
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
##
## Process blast output files
##
matchings = 0
hitCount = 0
parsedFile = os.path.join(megablastPath, "megablast.*.%s*.parsed" % (db))
matchings, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (parsedFile), True, log)
if exitCode == 0: ## if parsed file found.
t = matchings.split()
if len(t) == 1 and t[0].isdigit():
hitCount = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_MATCHING_HITS + " " + db, hitCount, log)
##
## add .parsed file
##
parsedFileFound, _, exitCode = run_sh_command("ls %s" % (parsedFile), True, log)
if parsedFileFound:
parsedFileFound = parsedFileFound.strip()
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_PARSED_FILE + " " + db, os.path.join(megablastPath, parsedFileFound), log)
else:
log.error("- Failed to add megablast parsed file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## wc the top hits
##
topHit = 0
tophitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.tophit" % (db))
tophits, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (tophitFile), True, log)
t = tophits.split()
if len(t) == 1 and t[0].isdigit():
topHit = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_HITS + " " + db, topHit, log)
##
## wc the taxonomic species
##
spe = 0
taxlistFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.taxlist" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (taxlistFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
spe = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TAX_SPECIES + " " + db, spe, log)
##
## wc the top 100 hit
##
top100hits = 0
top100hitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.top100hit" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (top100hitFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
top100hits = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_100HITS + " " + db, top100hits, log)
##
## Find and add taxlist file
##
taxListFound, _, exitCode = run_sh_command("ls %s" % (taxlistFile), True, log)
taxListFound = taxListFound.strip()
if taxListFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TAXLIST_FILE + " " + db, os.path.join(megablastPath, taxListFound), log)
else:
log.error("- Failed to add megablast taxlist file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add tophit file
##
tophitFound, _, exitCode = run_sh_command("ls %s" % (tophitFile), True, log)
tophitFound = tophitFound.strip()
if tophitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOPHIT_FILE + " " + db, os.path.join(megablastPath, tophitFound), log)
else:
log.error("- Failed to add megablast tophit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add top100hit file
##
top100hitFound, _, exitCode = run_sh_command("ls %s" % (top100hitFile), True, log)
top100hitFound = top100hitFound.strip()
if top100hitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOP100HIT_FILE + " " + db, os.path.join(megablastPath, top100hitFound), log)
else:
log.error("- Failed to add megablast top100hit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
else:
log.info("- No blast hits for %s." % (db))
return RQCExitCodes.JGI_SUCCESS
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level 20mer sampling
Usage : read_level_mer_sampling($analysis, $summary_file_dir)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/uniqueness
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_mer_sampling(dataToRecordDict, dataFile, log):
retCode = RQCExitCodes.JGI_FAILURE
## Old data
#nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer
## 0 1 2 3 4
##25000 2500 0.1 9704 0.3882
## New data
#count first rand first_cnt rand_cnt
# 0 1 2 3 4
#25000 66.400 76.088 16600 19022
#50000 52.148 59.480 13037 14870
#75000 46.592 53.444 11648 13361
#100000 43.072 49.184 10768 12296 ...
if os.path.isfile(dataFile):
with open(dataFile, "r") as merFH:
lines = merFH.readlines()
## last line
t = lines[-1].split('\t')
# breaks 2016-09-07
#assert len(t) == 5
totalMers = int(t[0])
## new by bbcountunique
uniqStartMerPer = float("%.2f" % (float(t[1])))
uniqRandtMerPer = float("%.2f" % (float(t[2])))
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_SAMPLE_SIZE] = totalMers
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_STARTING_MERS] = uniqStartMerPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_RANDOM_MERS] = uniqRandtMerPer
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (dataFile))
return retCode
"""
Title : base_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : base_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina base level data processing script.
"""
def base_level_qual_stats(dataToRecordDict, reformatObqhistFile, log):
cummlatPer = 0
cummlatBase = 0
statsPerc = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
statsBase = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
## New format
##Median 38
##Mean 37.061
##STDev 4.631
##Mean_30 37.823
##STDev_30 1.699
##Quality bases fraction
#0 159 0.00008
#1 0 0.00000
#2 12175 0.00593
#3 0 0.00000
#4 0 0.00000
#5 0 0.00000
#6 0 0.00000
allLines = open(reformatObqhistFile).readlines()
for l in allLines[::-1]:
l = l.strip()
##
## obqhist file format example
##
# #Median 36
# #Mean 33.298
# #STDev 5.890
# #Mean_30 35.303
# #STDev_30 1.517
# #Quality bases fraction
# 0 77098 0.00043
# 1 0 0.00000
# 2 0 0.00000
# 3 0 0.00000
# 4 0 0.00000
# 5 0 0.00000
# 6 0 0.00000
if len(l) > 0:
if l.startswith("#"):
if l.startswith("#Mean_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_STD] = l.split('\t')[1]
elif l.startswith("#Mean"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_STD] = l.split('\t')[1]
continue
qavg = None
nbase = None
percent = None
t = l.split()
try:
qavg = int(t[0])
nbase = int(t[1])
percent = float(t[2])
except IndexError:
log.warn("parse error in base_level_qual_stats: %s %s %s %s" % (l, qavg, nbase, percent))
continue
log.debug("base_level_qual_stats(): qavg and nbase and percent: %s %s %s" % (qavg, nbase, percent))
cummlatPer += percent * 100.0
cummlatPer = float("%.f" % (cummlatPer))
if cummlatPer > 100:
cummlatPer = 100.0 ## RQC-621
cummlatBase += nbase
if qavg == 30:
Q30_seen = 1
statsPerc[30] = cummlatPer
statsBase[30] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C30] = cummlatBase
elif qavg == 25:
Q25_seen = 1
statsPerc[25] = cummlatPer
statsBase[25] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
elif qavg == 20:
Q20_seen = 1
statsPerc[20] = cummlatPer
statsBase[20] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
elif qavg == 15:
Q15_seen = 1
statsPerc[15] = cummlatPer
statsBase[15] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
elif qavg == 10:
Q10_seen = 1
statsPerc[10] = cummlatPer
statsBase[10] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
elif qavg == 5:
Q5_seen = 1
statsPerc[5] = cummlatPer
statsBase[5] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
## Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
statsPerc[25] = statsPerc[30]
statsBase[25] = statsBase[30]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
statsPerc[20] = statsPerc[25]
statsBase[20] = statsBase[25]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
statsPerc[15] = statsPerc[20]
statsBase[15] = statsBase[20]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
statsPerc[10] = statsPerc[15]
statsBase[10] = statsBase[15]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
statsPerc[5] = statsPerc[10]
statsBase[5] = statsBase[10]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
if Q30_seen == 0:
log.error("Q30 is 0. Base quality values are ZERO.")
log.debug("Q and C values: %s" % (dataToRecordDict))
return RQCExitCodes.JGI_SUCCESS
"""
Title : q20_score
Function : this method returns Q20 using a qrpt file as input
Usage : JGI_QC_Utility::qc20_score($qrpt)
Args : $_[0] : qrpt file.
Returns : a number of Q20 score
Comments :
"""
# def q20_score(qrpt, log):
# log.debug("qrpt file %s" % (qrpt))
#
# q20 = None
# num = 0
#
# if os.path.isfile(qrpt):
# with open(qrpt, "r") as qrptFH:
# for l in qrptFH:
# num += 1
#
# if num == 1:
# continue
#
# ##############
# ## Old format
# ## READ1.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 12447306 32.87 31 34 34 3 27 34 108573 83917 81999 104127 85 378701
# ## 2 378701 2 34 12515957 33.05 33 34 34 1 32 34 112178 83555 84449 98519 0 378701
# ## 3 378701 2 34 12519460 33.06 33 34 34 1 32 34 104668 72341 80992 120700 0 378701
# ## 4 378701 2 37 13807944 36.46 37 37 37 0 37 37 96935 95322 83958 102440 46 378701
# ## 5 378701 2 37 13790443 36.42 37 37 37 0 37 37 114586 68297 78020 117740 58 378701
# ##
# ## or
# ##
# ## READ2.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 8875097 23.44 25 26 28 3 21 32 106904 84046 81795 105956 0 378701
# ## 2 378701 2 34 6543224 17.28 15 16 26 11 2 34 107573 77148 97953 88998 7029 378701
# ## 3 378701 2 34 7131741 18.83 16 16 26 10 2 34 96452 83003 107891 91355 0 378701
# ## 4 378701 2 37 9686653 25.58 19 32 33 14 2 37 97835 78304 87944 114618 0 378701
# ## 5 378701 2 37 10208226 26.96 25 33 35 10 10 37 98021 90611 89040 101029 0 378701
#
# pos = None
# mean = None
# t = l.split("\t")
# assert len(t) > 6
# pos = int(t[0])
# mean = float(t[5])
#
# if mean and pos:
# if mean < 20:
# return pos - 1
# else:
# q20 = pos
#
# else:
# log.error("- qhist file not found: %s" % (qrpt))
# return None
#
#
# return q20
def q20_score_new(bqHist, readNum, log):
log.debug("q20_score_new(): bqHist file = %s" % (bqHist))
q20 = None
if os.path.isfile(bqHist):
with open(bqHist, "r") as qrptFH:
for l in qrptFH:
if l.startswith('#'):
continue
## New data
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
##BaseNum count_1 min_1 max_1 mean_1 Q1_1 med_1 Q3_1 LW_1 RW_1 count_2 min_2 max_2 mean_2 Q1_2 med_2 Q3_2 LW_2 RW_2
# 0 6900 0 36 33.48 33 34 34 29 36 6900 0 36 33.48 33 34 34 29 36
pos = None
mean = None
t = l.split("\t")
pos = int(t[0]) + 1
if readNum == 1:
mean = float(t[4])
else:
mean = float(t[13])
if mean and pos:
if mean < 20:
return pos - 1
else:
q20 = pos
else:
log.error("- bqHist file not found: %s" % (bqHist))
return None
return q20
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : read_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_qual_stats(dataToRecordDict, qhistTxtFullPath, log):
retCode = RQCExitCodes.JGI_FAILURE
cummlatPer = 0.0
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
if os.path.isfile(qhistTxtFullPath):
stats = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
allLines = open(qhistTxtFullPath).readlines()
for l in allLines[::-1]:
if not l:
break
if l.startswith('#'):
continue
t = l.split()
assert len(t) == 3
qavg = int(t[0])
percent = float(t[2]) * 100.0 ## 20140826 Changed for bbtools
cummlatPer = cummlatPer + percent
cummlatPer = float("%.2f" % cummlatPer)
if qavg <= 30 and qavg > 25 and Q30_seen == 0:
Q30_seen = 1
stats[30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q30] = cummlatPer
elif qavg <= 25 and qavg > 20 and Q25_seen == 0:
Q25_seen = 1
stats[25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
elif qavg <= 20 and qavg > 15 and Q20_seen == 0:
Q20_seen = 1
stats[20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
elif qavg <= 15 and qavg > 10 and Q15_seen == 0:
Q15_seen = 1
stats[15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
elif qavg <= 10 and qavg > 5 and Q10_seen == 0:
Q10_seen = 1
stats[10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
elif qavg <= 5 and Q5_seen == 0:
Q5_seen = 1
stats[5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
### Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
stats[25] = stats[30]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
stats[20] = stats[25]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
stats[15] = stats[20]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
stats[10] = stats[15]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
stats[5] = stats[10]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
if Q30_seen == 0:
log.error("Q30 is 0 . Read quality values are ZERO.")
log.debug("Q30 %s, Q25 %s, Q20 %s, Q15 %s, Q10 %s, Q5 %s" % \
(stats[30], stats[25], stats[20], stats[15], stats[10], stats[5]))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (qhistTxtFullPath))
return retCode
"""
Title : read_gc_mean
Function : This function generates average GC content % and its standard deviation and put them into database.
Usage : read_gc_mean($analysis)
Args : 1) A reference to an JGI_Analysis object
Returns : JGI_SUCCESS:
JGI_FAILURE:
Comments :
"""
def read_gc_mean(histFile, log):
mean = 0.0
stdev = 0.0
retCode = RQCExitCodes.JGI_FAILURE
if os.path.isfile(histFile):
with open(histFile, "r") as histFH:
line = histFH.readline() ## we only need the first line
# Ex) #Found 1086 total values totalling 420.3971. <0.387106 +/- 0.112691>
if len(line) == 0 or not line.startswith("#Found"):
log.error("- GC content hist text file does not contains right results: %s, %s" % (histFile, line))
retCode = RQCExitCodes.JGI_FAILURE
else:
toks = line.split()
assert len(toks) == 9
mean = float(toks[6][1:]) * 100.0
stdev = float(toks[8][:-1]) * 100.0
log.debug("mean, stdev = %.2f, %.2f" % (mean, stdev))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- gc hist file not found: %s" % (histFile))
return retCode, mean, stdev
if __name__ == "__main__":
exit(0)
## EOF
| 1.96875 | 2 |
deathgod/directions.py | wmakley/deathgod | 0 | 12793701 | """Constants indicating compass directions used throughout deathgod."""
(NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST, NORTHWEST) = list(range(8))
| 1.710938 | 2 |
twitter_crawler/got/manager/TweetGenerator.py | AlexHtZhang/A-billion-dollar-deal | 0 | 12793702 | import json,sys,cookielib
from pyquery import PyQuery
from .. import models
from . import TweetCriteria
from . import TweetHelper
class TweetGenerator(object):
def __init__(self, noTweets = sys.maxint, tweetCriteria = TweetCriteria()):
assert isinstance(tweetCriteria, dict)
self.noTweets = noTweets
self.tweetCriteria = tweetCriteria
self.tweetIter = getTweetsGen(self.tweetCriteria, self.noTweets)
def __iter__(self):
return self.tweetIter
def __next__(self):
return self.tweetIter.next()
@staticmethod
def getTweets(tweetCriteria, noTweets, receiveBuffer=None, bufferLength=100, proxy=None):
'''
param tweetCriteria: input
type tweetCriteria: TweetCriteria
param noTweets: input
type noTweets: int
yields tweets that satisfy the criteria
'''
assert isinstance(noTweets, int)
assert isinstance(tweetCriteria, TweetCriteria)
refreshCursor = ''
results = []
resultsAux = []
cookieJar = cookielib.CookieJar()
if hasattr(tweetCriteria, 'username') and (tweetCriteria.username.startswith("\'") or tweetCriteria.username.startswith("\"")) and (tweetCriteria.username.endswith("\'") or tweetCriteria.username.endswith("\"")):
tweetCriteria.username = tweetCriteria.username[1:-1]
active = True
while active:
json = TweetHelper.getJsonResponse(tweetCriteria, refreshCursor, cookieJar, proxy)
if len(json['items_html'].strip()) == 0:
break
refreshCursor = json['min_position']
scrapedTweets = PyQuery(json['items_html'])
# Remove incomplete tweets withheld by Twitter Guidelines
scrapedTweets.remove('div.withheld-tweet')
tweets = scrapedTweets('div.js-stream-tweet')
if len(tweets) == 0:
break
while len(results) >= noTweets:
tmp = results[:noTweets]
results = results[noTweets:]
tweetCriteria.maxTweets = tweetCriteria.maxTweets - noTweets
yield tmp
for tweetHTML in tweets:
tweet = TweetHelper().parseTweet(tweetHTML)
results.append(tweet)
resultsAux.append(tweet)
if receiveBuffer and len(resultsAux) >= bufferLength:
receiveBuffer(resultsAux)
resultsAux = []
if tweetCriteria.maxTweets > 0 and len(results) >= tweetCriteria.maxTweets:
active = False
break
if receiveBuffer and len(resultsAux) > 0:
receiveBuffer(resultsAux)
while len(results) >= noTweets:
tmp = results[:noTweets]
results = results[noTweets:]
tweetCriteria.maxTweets = tweetCriteria.maxTweets - noTweets
yield tmp
| 2.8125 | 3 |
controllers/default.py | orbnauticus/metabase | 0 | 12793703 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
def readable_id(id):
return id.replace('_', ' ').title()
class Bijection(dict):
def __init__(self, mapping=None):
super(Bijection, self).__init__()
if mapping:
for key in mapping:
self[key] = mapping[key]
def __setitem__(self, key, value):
super(Bijection, self).__setitem__(key, value)
super(Bijection, self).__setitem__(value, key)
def __delitem__(self, key):
value = self[key]
super(Bijection, self).__delitem__(key)
super(Bijection, self).__delitem__(value)
class a62:
mapping = Bijection({j: i for j, i in zip(
range(62),
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
)})
base = 62
@classmethod
def encode(cls, value, length):
return ''.join([
cls.mapping[x] for x in (
(value // cls.base**i) % cls.base
for i in range(length - 1, -1, -1)
)
])
@classmethod
def decode(cls, text):
return sum(
cls.mapping[c] * cls.base**(len(text)-i-1)
for i, c in enumerate(text)
)
class ListView:
def __init__(self, table, names, query=None, orderby=None, title=None,
controller=None, function=None):
self.table = table
self.names = names
self.query = query or (self.table.id > 0)
self.orderby = orderby or self.table.id
self.title = title or readable_id(table._id.tablename)
self.controller = controller or request.controller
self.function = function or request.function
def headers(self):
for name in self.names:
yield readable_id(name) if name != 'id' else XML(' ')
def columns(self):
for name in self.names:
yield self.table[name]
def rows(self):
properties = dict(
orderby=self.orderby,
)
return db(self.query).iterselect(*self.columns(), **properties)
def view_url(self, id):
return URL(self.controller, self.function, args=[id])
def edit_url(self, id):
return URL(self.controller, self.function, args=[id, 'edit'],
vars={'next': request.env.path_info})
def delete_url(self, id):
return URL(self.controller, self.function, args=[id, 'delete'],
vars={'next': request.env.path_info})
def new_url(self):
return URL(self.controller, self.function, args=['new'])
class Form:
def __init__(self, table, record=None, default_redirect=None):
self.form = SQLFORM(
table, record,
fields=[field.name for field in table if field.name not in
{'created', 'created_by', 'modified', 'modified_by'}],
)
self.default_redirect = default_redirect
def process(self):
if self.form.process().accepted:
redirect(request.get_vars.next or
self.default_redirect(self.form.vars))
return self.form
class Itemview:
def __init__(self, table, record):
self.table = table
self.record = record
def related(self):
for field in self.table._extra['related']:
table = field.table
names = table._extra['columns']
query = (field == self.record.id)
yield ListView(table, names, query)
class Delegate(dict):
def __init__(self, function, reference, verb=None):
self.function = function
self.table = mb.handlers[self.function]
self.list_orderby = self.table._extra['primary']
self.list_columns = self.table._extra['columns']
dict.__init__(self,
display=self.display,
url=self.url,
function=self.function,
)
record = self.table(reference)
if record and verb is None:
response.view = 'itemview.html'
self['itemview'] = self.build_itemview(record)
elif record is None and verb is None:
response.view = 'listview.html'
self['listview'] = self.build_listview()
elif record is None and verb == 'new' or verb == 'edit':
response.view = 'form.html'
self['form'] = self.build_form(record)
elif record and verb == 'delete':
response.view = 'delete.html'
self['form'] = self.build_delete()
else:
raise HTTP(404)
def display(self, field, row, primary_reference=True):
text = row[field]
link = ''
type, is_reference, table_name = field.type.partition(' ')
if type == 'reference' and text is not None:
table = db[table_name]
reference = text
text = (table._format(table[text]) if callable(table._format)
else table._format % table[text].as_dict())
if 'urls' in table._extra:
link = self.url(table._extra['function'], reference)
elif field.represent is not None:
text = field.represent(text, row)
if text is None and hasattr(field, 'extra') and 'null_value' in field.extra:
text = field.extra['null_value']
if primary_reference:
if hasattr(field, 'extra') and field.extra.get('primary'):
link = self.url(field.table._extra['function'], row.id)
if link:
return A(text, _title=text, _href=link, _class=type)
else:
return SPAN(text, _title=text, _class=type)
def default_redirect(self, vars):
return self.url(self.function, vars.id)
def build_itemview(self, record):
return Itemview(self.table, record)
def build_listview(self):
return ListView(self.table, self.list_columns, orderby=self.list_orderby)
def build_form(self, record):
return Form(self.table, record, default_redirect=self.default_redirect).process()
def build_delete(self):
return
@classmethod
def url(cls, table, reference=None, verb=None):
args = [a62.encode(mb.handlers[table]._extra['index'], 4)]
if reference is not None:
args[0] += a62.encode(reference, 10)
if verb:
args.append(verb)
return URL(r=request, args=args)
@auth.requires_login()
def index():
tables = Bijection({table._extra['index']: key
for key, table in mb.handlers.items()})
first = request.args(0)
if first is None:
redirect(Delegate.url('object'))
if len(first) not in (4, 14):
raise HTTP(404)
function = tables.get(a62.decode(first[:4]), 'not found')
reference = a62.decode(first[4:]) if first[4:] else None
verb = {
None: None,
'e': 'edit',
'd': 'delete',
'n': 'new',
}[request.args(1)]
response.flash = CAT(
P('function: ', function),
P('reference: ', reference),
P('verb: ', verb),
)
return Delegate(function, reference, verb)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
'''
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
'''
def debug():
return dict(debug=dict(
user=auth.user,
))
| 2.359375 | 2 |
openstack_dashboard/dashboards/idm/organizations/tests.py | agaldemas/horizon | 0 | 12793704 | # Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from mox import IsA # noqa
from django import http
from django.core.urlresolvers import reverse
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.idm import tests as idm_tests
from openstack_dashboard.dashboards.idm.organizations \
import forms as organizations_forms
INDEX_URL = reverse('horizon:idm:organizations:index')
CREATE_URL = reverse('horizon:idm:organizations:create')
class BaseOrganizationsTests(idm_tests.BaseTestCase):
def _get_project_info(self, project):
project_info = {
"name": unicode(project.name),
"description": unicode(project.description),
"enabled": project.enabled,
"domain": IsA(api.base.APIDictWrapper),
"city": '',
"email": '',
"img":IsA(str),
# '/static/dashboard/img/logos/small/group.png',
"website" : ''
}
return project_info
class IndexTests(BaseOrganizationsTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
user_organizations = self.list_organizations()
# Owned organizations mockup
# Only calls the default/first tab, no need to mock the others tab
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
admin=False).AndReturn((user_organizations, False))
self.mox.ReplayAll()
response = self.client.get(INDEX_URL)
self.assertTemplateUsed(response, 'idm/organizations/index.html')
self.assertItemsEqual(response.context['table'].data, user_organizations)
self.assertNoMessages()
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_other_organizations_tab(self):
all_organizations = self.list_organizations()
user_organizations = all_organizations[len(all_organizations)/2:]
other_organizations = all_organizations[:len(all_organizations)/2]
# Other organizations mockup
api.keystone.tenant_list(IsA(http.HttpRequest),
admin=False).AndReturn((all_organizations, False))
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
admin=False).AndReturn((user_organizations, False))
self.mox.ReplayAll()
response = self.client.get(INDEX_URL + '?tab=panel_tabs__organizations_tab')
self.assertTemplateUsed(response, 'idm/organizations/index.html')
self.assertItemsEqual(response.context['table'].data, other_organizations)
self.assertNoMessages()
class DetailTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_get',
'user_list',
)
})
def test_detail(self):
project = self.get_organization()
users = self.users.list()
api.keystone.user_list(IsA(http.HttpRequest),
project=project.id,
filters={'enabled':True}).AndReturn(users)
api.keystone.tenant_get(IsA(http.HttpRequest),
project.id,
admin=True).AndReturn(project)
self.mox.ReplayAll()
url = reverse('horizon:idm:organizations:detail', args=[project.id])
response = self.client.get(url)
self.assertTemplateUsed(response, 'idm/organizations/detail.html')
self.assertItemsEqual(response.context['members_table'].data, users)
self.assertNoMessages()
class CreateTests(BaseOrganizationsTests):
@test.create_stubs({api.keystone: ('tenant_create',)})
def test_create_organization(self):
project = self.get_organization()
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest),
**project_details).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'CreateOrganizationForm',
'name': project._info["name"],
'description': project._info["description"],
}
response = self.client.post(CREATE_URL, form_data)
self.assertNoFormErrors(response)
def test_create_organization_required_fields(self):
form_data = {
'method': 'CreateOrganizationForm',
'name': '',
'description': '',
}
response = self.client.post(CREATE_URL, form_data)
self.assertFormError(response, 'form', 'name', ['This field is required.'])
self.assertNoMessages()
class UpdateInfoTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_update',
'tenant_get',
),
})
def test_update_info(self):
project = self.get_organization()
updated_project = {"name": 'Updated organization',
"description": 'updated organization',
"enabled": True,
"city": 'Madrid'}
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'InfoForm',
'orgID':project.id,
'name': updated_project["name"],
'description': updated_project["description"],
'city':updated_project["city"],
}
url = reverse('horizon:idm:organizations:info', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
@unittest.skip('not ready')
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_info_required_fields(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'InfoForm',
'orgID': project.id,
'name': '',
'description': '',
'city': '',
}
url = reverse('horizon:idm:organizations:info', args=[project.id])
response = self.client.post(url, form_data)
# FIXME(garcianavalon) form contains the last form in forms, not the one
# we want to test. The world is tought for multiforms :(
self.assertFormError(response, 'form', 'name', ['This field is required.'])
self.assertFormError(response, 'form', 'description', ['This field is required.'])
self.assertNoMessages()
class UpdateContactTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_update',
'tenant_get',
),
})
def test_update_contact(self):
project = self.get_organization()
updated_project = {
"email": '<EMAIL>',
"website": 'http://www.organization.com/',
}
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'ContactForm',
'orgID':project.id,
'email': updated_project["email"],
'website': updated_project["website"],
}
url = reverse('horizon:idm:organizations:contact', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_contact_required_fields(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'ContactForm',
'orgID':project.id,
'email': '',
'website': '',
}
url = reverse('horizon:idm:organizations:contact', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoMessages()
class DeleteTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_delete',
'tenant_get',
),
})
def test_delete_organization(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_delete(IsA(http.HttpRequest), project).AndReturn(None)
self.mox.ReplayAll()
form_data = {
'method': 'CancelForm',
'orgID': project.id,
}
url = reverse('horizon:idm:organizations:cancel', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
class UpdateAvatarTests(BaseOrganizationsTests):
# https://docs.djangoproject.com/en/1.7/topics/testing/tools/#django.test.Client.post
# https://code.google.com/p/pymox/wiki/MoxDocumentation
@unittest.skip('not ready')
@test.create_stubs({
api.keystone: (
'tenant_update',
),
})
def test_update_avatar(self):
project = self.get_organization()
mock_file = self.mox.CreateMock(file)
updated_project = {"image": 'image',}
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'AvatarForm',
'orgID':project.id,
'image': updated_project["image"],
}
url = reverse('horizon:idm:organizations:avatar', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
| 1.742188 | 2 |
PythonDesafios/d049.py | adaatii/Python-Curso-em-Video- | 0 | 12793705 | <filename>PythonDesafios/d049.py
#Refaça o DESAFIO 9, mostrando a tabuada
# de um número que o usuário escolher,
# só que agora utilizando um laço for.
num = int(input('Digite o numero da tabuada que deseja ver: '))
for i in range(1, 11):
print('{} X {} = {}'.format(i, num, i*num)) | 4.0625 | 4 |
bookwyrm/tests/templatetags/test_notification_page_tags.py | mouse-reeve/fedireads | 270 | 12793706 | <gh_stars>100-1000
""" style fixes and lookups for templates """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import models
from bookwyrm.templatetags import notification_page_tags
@patch("bookwyrm.activitystreams.add_status_task.delay")
@patch("bookwyrm.activitystreams.remove_status_task.delay")
class NotificationPageTags(TestCase):
"""lotta different things here"""
def setUp(self):
"""create some filler objects"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"mouseword",
local=True,
localname="mouse",
)
def test_related_status(self, *_):
"""gets the subclass model for a notification status"""
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
status = models.Status.objects.create(content="hi", user=self.user)
notification = models.Notification.objects.create(
user=self.user, notification_type="MENTION", related_status=status
)
result = notification_page_tags.related_status(notification)
self.assertIsInstance(result, models.Status)
| 2.234375 | 2 |
programming_ex/ex-11/POplanning/numbers.py | nnhjy/AI-python | 0 | 12793707 | <gh_stars>0
# State space problems with observability
#
# States
# applActions actions applicabable in the state
# succs successor states of the state w.r.t. action
# preds predecessor states of the state w.r.t. action
# compatible Is the state compatible with an observation?
# Actions
# observations observations possible after the action
### Description
#
# In this problem, there is a randomly picked number as the initial
# state, and the goal is to transform that number to 1 by
# actions -1, +2, and Mod 2, which have different observations
# which test whether the number is a prime or composite, or
# if it is even or odd.
#
### Observations for the weighing problem
class OBSprime:
def __init__(self):
self.dummy = 1
def __str__(self):
return "prime"
class OBScomposite:
def __init__(self):
self.dummy = 1
def __str__(self):
return "composite"
class OBSodd:
def __init__(self):
self.dummy = 1
def __str__(self):
return "odd"
class OBSeven:
def __init__(self):
self.dummy = 1
def __str__(self):
return "even"
class OBSnothingN:
def __init__(self):
self.dummy = 1
def __str__(self):
return "NOOBS"
### Actions for the weighing problem
class NumMinus1:
def __init__(self):
self.dummy = 0
def __str__(self):
return "Minus1"
def __eq__(self,other):
return isinstance(other,NumMinus1)
def __hash__(self):
return 0
def observations(self):
return [OBSprime(),OBScomposite()]
class NumPlus2:
def __init__(self):
self.dummy = 0
def __str__(self):
return "Plus2"
def __eq__(self,other):
return isinstance(other,NumPlus2)
def __hash__(self):
return 1
def observations(self):
return [OBSodd(),OBSeven()]
class NumMod2:
def __init__(self):
self.dummy = 0
def __str__(self):
return "Mod2"
def __eq__(self,other):
return isinstance(other,NumMod2)
def __hash__(self):
return 1
def observations(self):
return [OBSnothingN()]
### States for the weighing problem
class NumState:
def __init__(self,n):
self.n = n
def __hash__(self):
return self.n
def __eq__(self,other):
return isinstance(other,NumState) and self.n == other.n
def __str__(self):
return str(self.n)
def clonestate(self):
return NumState(self.n)
# Which actions applicable in a state?
# Both actions can only be taken if no package has been chosen,
# so the last action in every execution is always WeightChoose.
def applActions(self):
actions1 = []
actions2 = []
actions3 = []
if self.n >= 1 and self.n <= 7:
actions1 = [ NumMinus1() ]
if self.n >= 0 and self.n <= 5:
actions2 = [ NumPlus2() ]
if self.n >= 1 and self.n <= 7:
actions3 = [ NumMod2() ]
return actions1 + actions2 + actions3
# Successors of a state w.r.t. an action
def succs(self,action):
if isinstance(action,NumMod2):
s = self.clonestate()
s.n = self.n % 2
return {s}
if isinstance(action,NumMinus1):
s = self.clonestate()
s.n = self.n - 1
return {s}
if isinstance(action,NumPlus2):
s = self.clonestate()
s.n = self.n + 2
return {s}
raise Exception("Unrecognized action " + str(action))
# Predecessors of a state w.r.t. an action
def preds(self,action):
if isinstance(action,NumMod2):
if self.n == 0:
return set([ NumState(i) for i in [0,2,4,6]])
else:
return set([ NumState(i) for i in [1,3,5,7]])
if isinstance(action,NumMinus1):
if self.n > 6:
return set()
s = self.clonestate()
s.n = self.n + 1
return {s}
if isinstance(action,NumPlus2):
if self.n < 2:
return set()
s = self.clonestate()
s.n = self.n - 2
return {s}
raise Exception("Unrecognized action " + str(action))
# Is the state compatible with the observation?
def compatible(self,observation):
if isinstance(observation,OBSeven):
return (self.n % 2 == 0)
if isinstance(observation,OBSodd):
return (self.n % 2 == 1)
if isinstance(observation,OBSprime):
return (self.n in [0,1,2,3,5,7])
if isinstance(observation,OBScomposite):
return (self.n in [4,6])
if isinstance(observation,OBSnothingN):
return True
raise Exception("Unrecognized observation " + str(observation))
import itertools
# For every permutation of the weights [1,2,3,...,N] there is
# one initial state. Initially the 'chosen' package is -1.
def NUMinit():
return { NumState(i) for i in range(0,6) }
# Goal states are all permutations of the packages, but
# the chosen package must be the one with weight N.
def NUMgoal():
return { NumState(1) }
# Generate a weighing problem with N packages
def NUMproblem():
actions = [ NumPlus2(), NumMinus1(), NumMod2() ]
return (NUMinit(),NUMgoal(),actions)
# Testing the algorithm with the weighing problem.
from POplanFwd import POsolver
import sys
if __name__ == "__main__":
instance = NUMproblem()
inits,goals,actions = instance
for s in inits:
print(str(s))
print("Number of initial states is " + str(len(inits)))
plan = POsolver(instance)
if plan == None:
print("No plan found")
else:
# Show plan's text representation (a list of strings).
for s in plan.plan2str():
print(s)
# Show plan statistics
print("Plan found: size " + str(plan.plansize()) + " depth " + str(plan.plandepth()))
# Test that execution from every initial state reaches goals.
for state in inits:
plan.validate(state,goals)
| 3.375 | 3 |
boxm/update_downtown.py | mirestrepo/voxels-at-lems | 2 | 12793708 | <reponame>mirestrepo/voxels-at-lems
import boxm_batch;
import os;
import time;
boxm_batch.register_processes();
boxm_batch.register_datatypes();
#time.sleep(10);
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
# Capitol
model_dir = "/Users/isa/Experiments/DowntownBOXM_12_12_4";
model_imgs_dir = "/Users/isa/Experiments/DowntownBOXM_12_12_4/imgs"
around_imgs_dir = "/Users/isa/Experiments/DowntownBOXM_12_12_4/imgs360_%03d"
if not os.path.isdir( model_imgs_dir + "/"):
os.mkdir( model_imgs_dir + "/");
image_fnames = "/Volumes/vision/video/dec/Downtown/video/frame_%05d.png";
camera_fnames = "/Volumes/vision/video/dec/Downtown/cameras_KRT/camera_%05d.txt";
expected_fname = model_imgs_dir + "/expected_%05d.tiff";
image_id_fname = model_imgs_dir + "/schedule.txt";
expected_fname_no_dir = "/expected_%05d.tiff"
print("Creating a Scene");
boxm_batch.init_process("boxmCreateSceneProcess");
boxm_batch.set_input_string(0, model_dir +"/downtown_scene.xml");
boxm_batch.run_process();
(scene_id, scene_type) = boxm_batch.commit_output(0);
scene = dbvalue(scene_id, scene_type);
print("Loading Virtual Camera");
boxm_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm_batch.set_input_string(0,camera_fnames % 40);
boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
vcam = dbvalue(id,type);
nframes =181;
import random;
schedule = [i for i in range(0,nframes)];
random.shuffle(schedule);
#schedule = [176, 158, 146, 174, 9, 96, 89, 40, 152, 41, 58, 51, 175, 125, 155, 42, 10, 75, 173, 34, 145, 135, 163, 6, 11, 104, 3, 121, 52, 102, 46, 60, 117, 93, 126, 67, 16, 166, 13, 107, 164, 38, 78, 33, 56, 100, 24, 43, 120, 27, 113, 84, 151, 165, 147, 71, 131, 109, 124, 141, 105, 21, 129, 112, 137, 87, 26, 128, 180, 68, 142, 47, 48, 77, 150, 50, 91, 161, 144, 83, 64, 140, 81, 36, 167, 72, 45, 98, 88, 85, 156, 39, 12, 103, 2, 55, 61, 148, 80, 157, 136, 70, 23, 92, 8, 73, 172, 111, 116, 177, 29, 178, 49, 14, 138, 1, 115, 94, 22, 20, 66, 35, 17, 160, 154, 132, 99, 31, 18, 28, 57, 133, 54, 32, 127, 171, 76, 79, 168, 122, 143, 90, 149, 62, 108, 170, 37, 101, 179, 82, 106, 114, 5, 110, 169, 97, 44, 25, 118, 95, 7, 19, 162, 119, 134, 159, 15, 59, 63, 123, 130, 65, 69, 86, 139, 0, 53, 153, 74, 4, 30];
bad_frames = [34, 49, 63, 76, 113, 118, 127, 148]
print "schedule is ", schedule;
# write schedule file
fd = open(image_id_fname,"w");
print >>fd, len(schedule);
print >>fd, schedule;
fd.close()
print schedule;
for x in range(0,len(schedule),1):
#for x in range(63,64,1):
i = schedule[x];
is_good_frame = 1;
for b in range(0, len(bad_frames),1):
if (bad_frames[b]== i):
is_good_frame = 0;
print ("Skiping frame: "); print(i);
break;
if(is_good_frame):
print("Loading Camera");
boxm_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm_batch.set_input_string(0,camera_fnames % i);
status = boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
cam = dbvalue(id,type);
print("Loading Image");
boxm_batch.init_process("vilLoadImageViewProcess");
boxm_batch.set_input_string(0,image_fnames % i);
status = status & boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
image = dbvalue(id,type);
if(status):
print("Updating Scene");
boxm_batch.init_process("boxmUpdateRTProcess");
boxm_batch.set_input_from_db(0,image);
boxm_batch.set_input_from_db(1,cam);
boxm_batch.set_input_from_db(2,scene);
boxm_batch.set_input_unsigned(3,0);
boxm_batch.set_input_bool(4, 0);
boxm_batch.run_process();
#refine only for first 140 images
if(x < 140 ):
print("Refine Scene");
boxm_batch.init_process("boxmRefineSceneProcess");
boxm_batch.set_input_from_db(0,scene);
boxm_batch.set_input_float(1,0.2);
boxm_batch.set_input_bool(2,1);
boxm_batch.run_process();
# Generate Expected Image
print("Generating Expected Image");
boxm_batch.init_process("boxmRenderExpectedRTProcess");
boxm_batch.set_input_from_db(0,scene);
boxm_batch.set_input_from_db(1,vcam);
boxm_batch.set_input_unsigned(2,1280);
boxm_batch.set_input_unsigned(3,720);
boxm_batch.set_input_bool(4,0);
boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
expected = dbvalue(id,type);
(id,type) = boxm_batch.commit_output(1);
mask = dbvalue(id,type);
print("saving expected image");
boxm_batch.init_process("vilSaveImageViewProcess");
boxm_batch.set_input_from_db(0,expected);
boxm_batch.set_input_string(1,expected_fname % i);
boxm_batch.run_process();
if(((x+1) % 50 == 0) or (x == 180)):
temp_dir = around_imgs_dir % x;
os.mkdir( temp_dir + "/");
for j in range(0,nframes,8):
print("Loading Camera");
boxm_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm_batch.set_input_string(0,camera_fnames % j);
status = boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
cam = dbvalue(id,type);
# Generate Expected Image
print("Generating Expected Image");
boxm_batch.init_process("boxmRenderExpectedRTProcess");
boxm_batch.set_input_from_db(0,scene);
boxm_batch.set_input_from_db(1,cam);
boxm_batch.set_input_unsigned(2,1280);
boxm_batch.set_input_unsigned(3,720);
boxm_batch.set_input_bool(4,0);
boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
expected = dbvalue(id,type);
(id,type) = boxm_batch.commit_output(1);
mask = dbvalue(id,type);
image_name = expected_fname_no_dir % j
print("saving expected image");
boxm_batch.init_process("vilSaveImageViewProcess");
boxm_batch.set_input_from_db(0,expected);
boxm_batch.set_input_string(1, temp_dir + image_name);
boxm_batch.run_process();
#print("Save Scene");
#boxm_batch.init_process("boxmSaveOccupancyRawProcess");
#boxm_batch.set_input_from_db(0,scene);
#boxm_batch.set_input_string(1,model_dir + "/sample_scene.raw");
#boxm_batch.set_input_unsigned(2,0);
#boxm_batch.set_input_unsigned(3,0);
#boxm_batch.run_process();
print("Save Scene");
boxm_batch.init_process("boxmSaveOccupancyRawProcess");
boxm_batch.set_input_from_db(0,scene);
boxm_batch.set_input_string(1,model_dir + "/all_sample_scene.raw");
boxm_batch.set_input_unsigned(2,0);
boxm_batch.set_input_unsigned(3,1);
boxm_batch.run_process(); | 2.1875 | 2 |
sftpsync/command_line.py | marccarre/sftpsync | 1 | 12793709 | import sys
from sys import argv, exit
import os
from os import linesep
from getopt import getopt, GetoptError
import re
import socks
from getpass import getuser
ERROR_ILLEGAL_ARGUMENTS = 2
def usage(error_message=None):
if error_message:
sys.stderr.write('ERROR: ' + error_message + linesep)
sys.stdout.write(linesep.join([
'Usage:',
' sftpsync.py [OPTION]... SOURCE DESTINATION',
'Pull:',
' sftpsync.py [OPTION]... [s]ftp://[user[:password]@]host[:port][/path] /path/to/local/copy',
'Push:',
' sftpsync.py [OPTION]... /path/to/local/copy [s]ftp://[user[:password]@]host[:port][/path]',
'',
'Defaults:',
' user: anonymous',
' password: <PASSWORD>',
' port: 22',
' path: /',
'',
'Options:',
'-f/--force Force the synchronization regardless of files\' presence or timestamps.',
'-F config_file Specifies an alternative per-user configuration file.',
' If a configuration file is given on the command line, the system-wide configuration file (/etc/ssh/ssh_config) will be ignored.',
' The default for the per-user configuration file is ~/.ssh/config.',
'-h/--help Prints this!',
'-i/--identity identity_file',
' Selects the file from which the identity (private key) for public key authentication is read.',
'-o ssh_option',
' Can be used to pass options to ssh in the format used in ssh_config(5). This is useful for specifying options for which there is no separate sftpsync command-line flag.',
' For full details of the options listed below, and their possible values, see ssh_config(5).',
' ProxyCommand',
'-p/--preserve: Preserves modification times, access times, and modes from the original file.',
'--proxy [user[:password]@]host[:port]',
' SOCKS proxy to use. If not provided, port will be defaulted to 1080.',
'--proxy-version SOCKS4|SOCKS5',
' Version of the SOCKS protocol to use. Default is SOCKS5.',
'-q/--quiet: Quiet mode: disables the progress meter as well as warning and diagnostic messages from ssh(1).',
'-r/--recursive: Recursively synchronize entire directories.',
'-v/--verbose: Verbose mode. Causes sftpsync to print debugging messages about their progress. This is helpful in debugging connection, authentication, and configuration problems.',
''
]))
def configure(argv):
try:
# Default configuration:
config = {
'force': False,
'preserve': False,
'quiet': False,
'recursive': False,
'verbose': False,
'private_key': None,
'proxy': None,
'proxy_version': socks.SOCKS5,
'ssh_config' : '~/.ssh/config',
'ssh_options': {},
}
opts, args = getopt(argv, 'fF:hi:o:pqrv', ['force', 'help', 'identity=', 'preserve', 'proxy=', 'proxy-version=', 'quiet', 'recursive', 'verbose'])
for opt, value in opts:
if opt in ('-h', '--help'):
usage()
exit()
if opt in ('-f', '--force'):
config['force'] = True
if opt in ('-p', '--preserve'):
config['preserve'] = True
if opt in ('-q', '--quiet'):
config['quiet'] = True
if opt in ('-r', '--recursive'):
config['recursive'] = True
if opt in ('-v', '--verbose'):
config['verbose'] = True
if opt in ('-i', '--identity'):
config['private_key'] = _validate_private_key_path(value)
if opt == '--proxy':
config['proxy'] = _validate_and_parse_socks_proxy(value)
if opt == '--proxy-version':
config['proxy_version'] = _validate_and_parse_socks_proxy_version(value)
if opt == '-F':
config['ssh_config'] = _validate_ssh_config_path(value)
if opt == '-o':
k, v = _validate_ssh_option(value)
config['ssh_options'][k] = v
if config['verbose'] and config['quiet']:
raise ValueError('Please provide either -q/--quiet OR -v/--verbose, but NOT both at the same time.')
if len(args) != 2:
raise ValueError('Please provide a source and a destination. Expected 2 arguments but got %s: %s.' % (len(args), args))
(source, destination) = args
config['source'] = _validate_source(source)
config['destination'] = _validate_destination(destination)
return config
except GetoptError as e:
usage(str(e))
exit(ERROR_ILLEGAL_ARGUMENTS)
except ValueError as e:
usage(str(e))
exit(ERROR_ILLEGAL_ARGUMENTS)
def _validate_private_key_path(path):
if not path:
raise ValueError('Invalid path: "%s". Please provide a valid path to your private key.' % path)
if not os.path.exists(path):
raise ValueError('Invalid path: "%s". Provided path does NOT exist. Please provide a valid path to your private key.' % path)
return path
def _validate_ssh_config_path(path):
if not path:
raise ValueError('Invalid path: "%s". Please provide a valid path to your SSH configuration.' % path)
if not os.path.exists(path):
raise ValueError('Invalid path: "%s". Provided path does NOT exist. Please provide a valid path to your SSH configuration.' % path)
return path
def _validate_ssh_option(option, white_list=['ProxyCommand']):
key_value = option.split('=', 1) if '=' in option else option.split(' ', 1)
if not key_value or not len(key_value) == 2:
raise ValueError('Invalid SSH option: "%s".' % option)
key, value = key_value
if not key or not value:
raise ValueError('Invalid SSH option: "%s".' % option)
if key not in white_list:
raise ValueError('Unsupported SSH option: "%s". Only the following SSH options are currently supported: %s.' % (key, ', '.join(white_list)))
return key, value
_USER = 'user'
_PASS = '<PASSWORD>'
_HOST = 'host'
_PORT = 'port'
_PATH = 'path'
_DRIVE = 'drive'
_FILEPATH = 'filepath'
_PATTERNS = {
_USER: r'.+?',
_PASS: r'.+?',
_HOST: r'[\w\-\.]{3,}?',
_PORT: r'|\d{1,4}|6553[0-5]|655[0-2]\d|65[0-4]\d{2}|6[0-4]\d{3}|[0-5]\d{4}',
_PATH: r'/.*',
_DRIVE: r'[a-zA-Z]{1}:',
_FILEPATH: r'.*?',
}
def _group(name, patterns=_PATTERNS):
return '(?P<%s>%s)' % (name, patterns[name])
_PROXY_PATTERN = '^(%s(:%s)?@)?%s(:%s)?$' % (_group(_USER), _group(_PASS), _group(_HOST), _group(_PORT))
_SFTP_PATTERN = '^s?ftp://(%s(:%s)?@)?%s(:%s)?%s?$' % (_group(_USER), _group(_PASS), _group(_HOST), _group(_PORT), _group(_PATH))
_PATH_PATTERN = '^%s?%s$' % (_group(_DRIVE), _group(_FILEPATH))
def _validate_and_parse_socks_proxy(proxy):
return _validate_and_parse_connection_string(proxy, _PROXY_PATTERN, 'Invalid proxy: "%s".' % proxy)
def _validate_and_parse_sftp(sftp):
return _validate_and_parse_connection_string(sftp, _SFTP_PATTERN, 'Invalid SFTP connection details: "%s".' % sftp)
def _validate_and_parse_connection_string(connection_string, pattern, error_message):
'''
Parses the provided connection string against the provided pattern into a dictionary, if there is a match,
or raises exception if no match.
'''
match = re.search(pattern, connection_string)
if not match:
raise ValueError(error_message)
return dict((key, value) for (key, value) in match.groupdict().items() if value)
def _validate_and_parse_socks_proxy_version(socks_version, white_list=['SOCKS4', 'SOCKS5']):
if socks_version not in white_list:
raise ValueError('Invalid SOCKS proxy version: "%s". Please choose one of the following values: { %s }.' % (socks_version, ', '.join(white_list)))
return eval('socks.%s' % socks_version)
def _validate_source(source):
if _is_sftp(source):
return _validate_and_parse_sftp(source)
if _is_path(source):
return _validate_is_readable_path(source)
raise ValueError('Invalid source. Please provide either SFTP connection details or a path to a local, existing and readable folder: %s.' % source)
def _validate_destination(destination):
if _is_sftp(destination):
return _validate_and_parse_sftp(destination)
if _is_path(destination):
return _validate_is_writable_path(destination)
raise ValueError('Invalid destination. Please provide either SFTP connection details or a path to a local, existing and writable folder: %s.' % destination)
def _is_sftp(sftp):
return re.search(_SFTP_PATTERN, sftp)
def _is_path(path):
return re.search(_PATH_PATTERN, path)
def _validate_is_readable_path(path):
if not os.path.exists(path):
raise ValueError('Invalid path. "%s" does NOT exist.' % path)
if not os.access(os.path.abspath(path), os.R_OK):
raise ValueError('Invalid path. "%s" exists but user "%s" does NOT have read access.' % (path, getuser()))
return path
def _validate_is_writable_path(path):
if not os.path.exists(path):
raise ValueError('Invalid path. "%s" does NOT exist.' % path)
if not os.access(os.path.abspath(path), os.W_OK):
raise ValueError('Invalid path. "%s" exists but user "%s" does NOT have write access.' % (path, getuser()))
return path
| 2.53125 | 3 |
test_flexipy/test_faktura.py | Vitexus/flexipy | 3 | 12793710 | # -*- coding: utf-8 -*-
from flexipy import Faktura
from flexipy import config
import requests
import json
class TestFaktura:
def setup(self):
self.conf = config.TestingConfig()
server_settings = self.conf.get_server_config()
self.username = str(server_settings['username'])
self.password = str(server_settings['password'])
self.url = str(server_settings['url'])
self.faktura = Faktura(self.conf)
def test_get_all_vydane_faktury(self):
r = requests.get(self.url+'faktura-vydana.json' ,auth=(self.username,self.password), verify=False)
d = r.json()
if len(d['winstrom']['faktura-vydana']):
list_of_invoices_expected = d['winstrom']['faktura-vydana'][0]
else:
list_of_invoices_expected = d['winstrom']['faktura-vydana']
list_of_invoices_actual = self.faktura.get_all_vydane_faktury()
assert list_of_invoices_expected == list_of_invoices_actual
def test_get_all_prijate_faktury(self):
r = requests.get(self.url+'faktura-prijata.json' ,auth=(self.username,self.password), verify=False)
d = r.json()
if(len(d['winstrom']['faktura-prijata']) == 1):
list_of_invoices_expected = d['winstrom']['faktura-prijata'][0]
else:
list_of_invoices_expected = d['winstrom']['faktura-prijata']
list_of_invoices_actual = self.faktura.get_all_prijate_faktury()
assert list_of_invoices_expected == list_of_invoices_actual
def test_create_vydana_faktura(self):
expected_data = {'kod':'flex11','typDokl':'code:FAKTURA','firma':'code:201','popis':'Flexipy test invoice', 'sumDphZakl':'0.0','bezPolozek':'true', 'varSym':'11235484','zdrojProSkl':'false'}
dalsi_param = {'popis':'Flexipy test invoice','firma':'code:201'}
result = self.faktura.create_vydana_faktura(kod='flex11', var_sym='11235484', datum_vyst='2013-02-28', zdroj_pro_sklad=False, typ_dokl=self.conf.get_typy_faktury_vydane()[0], dalsi_param=dalsi_param)
assert result[0] == True #expected True
id = result[1]
actualData = self.faktura.get_vydana_faktura(id, detail='full')
assert actualData['kod'].lower() == expected_data['kod'].lower()
assert actualData['typDokl'] == expected_data['typDokl']
assert actualData['firma'] == expected_data['firma']
assert actualData['popis'] == expected_data['popis']
assert actualData['sumDphZakl'] == expected_data['sumDphZakl']
#uklid po sobe
self.faktura.delete_vydana_faktura(id)
def test_create_vydana_faktura_polozky(self):
polozky = [{'typPolozkyK':self.conf.get_typ_polozky_vydane()[0],'zdrojProSkl':False,'nazev':'vypujceni auta','ucetni':True,'cenaMj':'4815.0'}]
expected_data = {'kod':'flex12','typDokl':'code:FAKTURA','firma':'code:201','popis':'Flexipy test invoice',
'varSym':'11235484','zdrojProSkl':'false','polozkyFaktury':polozky}
expected_polozky = [{'typPolozkyK':'typPolozky.obecny','zdrojProSkl':'false','nazev':'vypujceni auta','ucetni':'true','cenaMj':'4815.0'}]
dalsi_param = {'popis':'Flexipy test invoice','firma':'code:201','typUcOp':u'code:TRŽBA SLUŽBY'}
result = self.faktura.create_vydana_faktura(kod='flex12', var_sym='11235484', datum_vyst='2013-02-28',
zdroj_pro_sklad=False, typ_dokl=self.conf.get_typy_faktury_vydane()[0], dalsi_param=dalsi_param, polozky_faktury=polozky)
assert result[0] == True #expected True
id = result[1]
actualData = self.faktura.get_vydana_faktura(id, detail='full')
assert actualData['kod'].lower() == expected_data['kod'].lower()
assert actualData['typDokl'] == expected_data['typDokl']
assert actualData['firma'] == expected_data['firma']
assert actualData['popis'] == expected_data['popis']
#pocet polozek se musi rovnat
assert len(actualData['polozkyFaktury']) == len(expected_polozky)
actual_polozky = actualData['polozkyFaktury'][0]
assert actual_polozky['typPolozkyK'] == expected_polozky[0]['typPolozkyK']
assert actual_polozky['nazev'] == expected_polozky[0]['nazev']
assert actual_polozky['cenaMj'] == expected_polozky[0]['cenaMj']
#uklid po sobe
self.faktura.delete_vydana_faktura(id) | 2.609375 | 3 |
note-input/main.py | hyper-neutrino/hack-the-north-2017 | 0 | 12793711 | <filename>note-input/main.py
import subprocess
import sys
import re
import json
import os
import requests
requests.get("http://localhost:5000/recorder/set_killee_process?pid=%s" % os.getpid())
try:
port = int(sys.argv[-1])
except:
print("Usage:\npython3 main.py <port>")
sys.exit(-1)
process = subprocess.Popen(["aseqdump", "-p", str(port)], stdout=subprocess.PIPE)
def parse_line(line):
# line = re.sub(" +", "^", line.decode().strip())
parts = re.split(" +", line.decode().strip())
evt = parts[1] # Such as `Note on`, etc
if len(parts) >= 3:
properties = parts[2].split(", ")[1:]
else:
properties = []
properties_dict = dict()
for prop in properties:
properties_dict[prop[:prop.index(" ")]] = prop[prop.index(" ") + 1:]
if "velocity" in properties_dict:
properties_dict["velocity"] = int(properties_dict["velocity"]) # Hack
return dict(type=evt, pitch=int(properties_dict["note"]), **properties_dict)
process.stdout.readline()
process.stdout.readline()
# print(json.dumps(pid=os.getpid(), type="pid_headsup"))
for line in process.stdout:
if "active sensing" not in line.decode().lower() and "clock" not in line.decode().lower():
print(json.dumps(parse_line(line)))
sys.stdout.flush()
| 2.46875 | 2 |
Chinchira19041.py | fujisaki-tamago/anime2021 | 2 | 12793712 | from anime2021.anime import *
chinchira_url = 'https://1.bp.blogspot.com/-OBrXImM0Sd4/X5OcFkxjEnI/AAAAAAABb40/HSDcWu_nYSUbAmRlWro9bHF6pkZJEyFngCNcBGAsYHQ/s789/animal_mawashi_guruma_chinchira.png'
class RollingChinchira(AImage):
def __init__(self, width=100, height=100, cx=None, cy=None, image=chinchira_url, scale=1.0):
self.width = width
self.height = height
self.scale = scale
if image.startswith('http'):
self.pic = Image.open(io.BytesIO(requests.get(image).content))
else:
self.pic = Image.open(image)
def render(self, canvas: ACanvas, tick: int):
ox, oy, w, h = self.bounds()
pic = self.pic.resize((int(w), int(h)))
r = min(self.width, self.height)/2
for i in range(180):
slope = math.pi * 2 * 10 * (tick/180)
ox = self.cx + r * math.cos(i * slope)
oy = self.cy + r * math.sin(i * slope)
canvas.image.paste(pic, (int(ox), int(oy)), pic)
def chinchira_shape(shape):
studio = AStudio(400,300)
studio.append(shape)
frames = 50
for t in range(frames):
x = 400 - 8*t
y = 150
shape.cx = x
shape.cy = y
studio.render()
return studio.create_anime(delay=50)
| 2.890625 | 3 |
eland/tests/field_mappings/test_metric_source_fields_pytest.py | redNixon/eland | 0 | 12793713 | <reponame>redNixon/eland
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import numpy as np
import eland as ed
from eland.tests import ES_TEST_CLIENT, ECOMMERCE_INDEX_NAME, FLIGHTS_INDEX_NAME
from eland.tests.common import TestData
class TestMetricSourceFields(TestData):
def test_flights_all_metric_source_fields(self):
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT), index_pattern=FLIGHTS_INDEX_NAME
)
pd_flights = self.pd_flights()
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields()
pd_metric = pd_flights.select_dtypes(include=np.number)
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == {None}
def test_flights_all_metric_source_fields_and_bool(self):
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT), index_pattern=FLIGHTS_INDEX_NAME
)
pd_flights = self.pd_flights()
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields(
include_bool=True
)
pd_metric = pd_flights.select_dtypes(include=[np.number, "bool"])
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == {None}
def test_flights_all_metric_source_fields_bool_and_timestamp(self):
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT), index_pattern=FLIGHTS_INDEX_NAME
)
pd_flights = self.pd_flights()
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields(
include_bool=True, include_timestamp=True
)
pd_metric = pd_flights.select_dtypes(include=[np.number, "bool", "datetime"])
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == set(
{"strict_date_hour_minute_second", None}
) # TODO - test position of date_format
def test_ecommerce_selected_non_metric_source_fields(self):
field_names = [
"category",
"currency",
"customer_birth_date",
"customer_first_name",
"user",
]
"""
Note: non of there are metric
category object
currency object
customer_birth_date datetime64[ns]
customer_first_name object
user object
"""
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT),
index_pattern=ECOMMERCE_INDEX_NAME,
display_names=field_names,
)
pd_ecommerce = self.pd_ecommerce()[field_names]
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields()
pd_metric = pd_ecommerce.select_dtypes(include=np.number)
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == set()
def test_ecommerce_selected_mixed_metric_source_fields(self):
field_names = [
"category",
"currency",
"customer_birth_date",
"customer_first_name",
"total_quantity",
"user",
]
"""
Note: one is metric
category object
currency object
customer_birth_date datetime64[ns]
customer_first_name object
total_quantity int64
user object
"""
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT),
index_pattern=ECOMMERCE_INDEX_NAME,
display_names=field_names,
)
pd_ecommerce = self.pd_ecommerce()[field_names]
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields()
pd_metric = pd_ecommerce.select_dtypes(include=np.number)
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == {None}
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
def test_ecommerce_selected_all_metric_source_fields(self):
field_names = ["total_quantity", "taxful_total_price", "taxless_total_price"]
"""
Note: all are metric
total_quantity int64
taxful_total_price float64
taxless_total_price float64
"""
ed_field_mappings = ed.FieldMappings(
client=ed.Client(ES_TEST_CLIENT),
index_pattern=ECOMMERCE_INDEX_NAME,
display_names=field_names,
)
pd_ecommerce = self.pd_ecommerce()[field_names]
ed_dtypes, ed_fields, es_date_formats = ed_field_mappings.metric_source_fields()
pd_metric = pd_ecommerce.select_dtypes(include=np.number)
assert pd_metric.dtypes.to_list() == ed_dtypes
assert pd_metric.columns.to_list() == ed_fields
assert len(es_date_formats) == len(ed_dtypes)
assert set(es_date_formats) == {None}
| 2.125 | 2 |
runfiles/internal/codegen.bzl | fmeum/rules_runfiles | 7 | 12793714 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(":common.bzl", "escape")
def generate_nested_structure(runfile_structs, begin_group, end_group, emit, indent_per_level = 0):
runfiles_per_path = {}
for runfile in runfile_structs:
segments = [runfile.repo] + (runfile.pkg.split("/") if runfile.pkg else [])
segments = [escape(s) for s in segments]
segments = tuple(segments)
runfiles_per_path.setdefault(segments, []).append(runfile)
paths = sorted(runfiles_per_path.keys())
# Sentinel values to close the last group.
paths.append(tuple())
runfiles_per_path[tuple()] = []
# Sentinel value to open the first group.
previous_segments = []
code = []
for segments in paths:
mismatch_pos = _mismatch(previous_segments, segments)
if mismatch_pos != -1:
for pos in range(len(previous_segments) - 1, mismatch_pos - 1, -1):
code.append(_indent(end_group(previous_segments[pos]), indent_per_level * pos))
for pos in range(mismatch_pos, len(segments)):
code.append(_indent(begin_group(segments[pos]), indent_per_level * pos))
definitions = []
for runfile in runfiles_per_path[segments]:
definitions.append(_indent(emit(runfile), indent_per_level * len(segments)))
if definitions:
code.append("\n\n".join(definitions))
previous_segments = segments
return "\n".join(code)
def _mismatch(l1, l2):
min_length = min(len(l1), len(l2))
for i in range(min_length):
if l1[i] != l2[i]:
return i
if len(l1) == len(l2):
return -1
else:
return min_length
def _indent(s, level):
if level == 0:
return s
indent = level * " "
return "\n".join([indent + line for line in s.split("\n")])
| 2.109375 | 2 |
workplace_extractor/Nodes/__init__.py | denisduarte/workplace_extractor | 0 | 12793715 | <reponame>denisduarte/workplace_extractor
# from workplace_extractor.Nodes.Node import Node
# from workplace_extractor.Nodes.NodeCollection import NodeCollection, PostCollection
# from workplace_extractor.Nodes.Post import Post
# from workplace_extractor.Nodes.Group import Group
| 1.171875 | 1 |
src/debruijnextend/utils.py | Dreycey/DebruijnExtend | 0 | 12793716 | """
This module contains methods/objects that facilitate
basic operations.
"""
# std pkgs
import numpy as np
import random
from typing import Dict, List, Optional, Union
from pathlib import Path
import pickle
# non-std pkgs
import matplotlib.pyplot as plt
def hamming_dist(k1, k2):
val = 0
for ind, char in enumerate(k1):
if char != k2[ind]: val += 1
return val
def clean_input_sequences(input_seq):
"""
This method cleans all input sequences to ensure they will
be compatible with the precomputed hash table.
"""
seq_list = []
for aa in input_seq:
if aa not in ["A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]:
print(aa)
if aa == "*":
amino_chosen = "G"
elif aa == "B":
amino_chosen = np.random.choice(["N", "D"], 1, p=[0.5, 0.5])[0]
elif aa == "Z":
amino_chosen = np.random.choice(["Q", "E"], 1, p=[0.5, 0.5])[0]
elif aa == "J":
amino_chosen = np.random.choice(["L", "I"], 1, p=[0.5, 0.5])[0]
elif aa == "X":
amino_chosen = random.choice(["A", "R", "N", "D", "C",
"Q", "E", "G", "H", "I",
"L", "K", "M", "F", "P",
"S", "T", "W", "Y", "V"])[0]
else:
amino_chosen = aa
seq_list.append(amino_chosen)
return ''.join(seq_list) #+ input_seq[kmer_size+1:]
def readFasta(fasta_file_path: Union[str, Path]):
"""
This function reads a fasta file
Parameters
----------
fasta file path: string OR Path
Returns
-------
proteins : array of protein sequence (ordered)
protein_names : array of protein names (ordered)
"""
proteins, protein_names = [], []
with open(fasta_file_path) as fasta_file:
fasta_file_array = fasta_file.readlines()
for line_count, fasta_line in enumerate(fasta_file_array):
if (fasta_line[0] == ">"):
name = fasta_line.strip("\n")
protein_names.append(name)
proteins.append(protein_seq) if line_count > 0 else None
protein_seq = "" # renew sequence everytime fasta name is added.
else:
protein_seq += fasta_line.strip("\n")
proteins.append(protein_seq)
return proteins, protein_names
def get_kmer_size(hash_table) -> int:
"""
This function extracts the kmer size from
the hash table.
"""
kmer_size = 0
with open(hash_table, "rb") as hash_tb:
hash = pickle.load(hash_tb)
kmer_size = len(list(hash.keys())[0])
return kmer_size
| 3.15625 | 3 |
ESP8266/WIFI/httpcc/WebControl.py | 1021256354/ELE-Clouds | 3 | 12793717 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
u'''
******************************************************************************
* 文 件:WebControl.py
* 概 述:Web页面控制设备
* 版 本:V0.10
* 作 者:<NAME>
* 日 期:2018年5月8日
* 历 史: 日期 编辑 版本 记录
2018年5月8日 <NAME> V0.10 创建文件
******************************************************************************'''
from ESP8266.WIFI.httpcc import netConnect # 此处引用为获取netConnect文件中的全局变量“wlan”。
import socket
from machine import Pin
# 读取WEB页面代码文件
htmls = open("ESP8266/WIFI/httpcc/WebKZ.html")
html= htmls.read() #此处直接由文件读取,如果直接在此处写入页面代码,须使用 ''' ''' 将代码包含,否则无法显示
htmls.close()
#Wemos Dpin to GPIO
#D1->GPIO5 ---- 红色
#D2->GPIO4 ---- 绿色
#D5->GPIO14 ---- 蓝色
ledBlue = Pin(14, Pin.OUT) # 蓝色
ledGrean = Pin(4, Pin.OUT) # 绿色
ledRed = Pin(5, Pin.OUT) # 红色
# 只亮绿色
def greanOnly():
ledBlue.off()
ledGrean.on()
ledRed.off()
# 全开
def allOn():
ledBlue.on()
ledGrean.on()
ledRed.on()
# 只亮红色
def redOnly():
ledBlue.off()
ledGrean.off()
ledRed.on()
# 只亮蓝色
def blueOnly():
ledBlue.on()
ledGrean.off()
ledRed.off()
# 全关
def allOff():
ledBlue.off()
ledGrean.off()
ledRed.off()
port=80
listenSocket=None
ip=netConnect.wlan.ifconfig()[0]
listenSocket = socket.socket() #建立一个实例
listenSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listenSocket.bind((ip,port)) #绑定建立网路连接的ip地址和端口
listenSocket.listen(5) #开始侦听
print ('等待TCP连接...')
while True:
print("连接中.....")
conn, addr = listenSocket.accept()
print("已连接 %s" % str(addr))
request = conn.recv(1024)
print("内容: %s" % str(request))
request = str(request)
CMD_grean = request.find('/?CMD=greenlight') #如果在请求的包中,发现有?CMD=greenlight,下同
CMD_allon = request.find('/?CMD=allon')
CMD_red = request.find('/?CMD=redlight')
CMD_blue = request.find('/?CMD=bluelight')
CMD_alloff = request.find('/?CMD=alloff')
print("Data: " + str(CMD_grean))
print("Data: " + str(CMD_allon))
print("Data: " + str(CMD_red))
print("Data: " + str(CMD_blue))
print("Data: " + str(CMD_alloff))
if CMD_grean == 6: #如果此命令有效,下同
print('+grean')
greanOnly() #调用仅点亮绿灯函数,下同
if CMD_allon == 6:
print('+allon')
allOn()
if CMD_red == 6:
print('+red')
redOnly()
if CMD_blue == 6:
print('+blue')
blueOnly()
if CMD_alloff == 6:
print('+alloff')
allOff()
response = html #将html的网页定义装载在回应字段
conn.send(response) #send到浏览器上,就形成了控制界面
conn.close()
| 2.125 | 2 |
taiseia101/dehumidifier.py | slee124565/pytwseia | 3 | 12793718 | <reponame>slee124565/pytwseia
from taiseia101.core import *
logger = logging.getLogger(__name__)
class Dehumidifier(GeneralDevice):
dev_type = {'id': DeviceTypeCode.DEHUMIDIFIER, 'name': 'DEHUMIDIFIER'}
class ServiceCode(BaseObject):
POWER_RW = 0x00
OP_MODE_RW = 0x01
OP_TIMER_RW = 0x02
HUMIDITY_CFG_RW = 0x03
DEHUMIDIFIER_LEVEL_RW = 0x04
DRY_CLOTHE_LEVEL_RW = 0x05
TEMPERATURE_R = 0x06
HUMIDITY_R = 0x07
FAN_DIRECTION_AUTO_RW = 0x08
FAN_DIRECTION_LEVEL_RW = 0x09
WATER_FULL_ALARM_R = 0x0a
FILTER_CLEAN_NOTIFY_RW = 0x0b
MOOD_LED_RW = 0x0c
AIR_CLEAN_MODE_RW = 0x0d
FAN_LEVEL_RW = 0x0e
SIDE_FAN_R = 0x0f
AUDIO_RW = 0x10
DEFROST_DISPLAY_R = 0x11
DISPLAY_ERR_R = 0x12
DEV_MILDEW_RW = 0x13
HUMIDITY_HIGH_NOTIFY_RW = 0x14
HUMIDITY_HIGH_CFG_RW = 0x15
KEYPAD_LOCK_RW = 0x16
REMOTE_CTRL_LOCK_RW = 0x17
SAA_CTRL_AUDIO_RW = 0x18
OP_CURRENT_R = 0x19
OP_VOLTAGE_R = 0x1a
OP_POWER_FACTOR_R = 0x1b
OP_POWER_WATT_RW = 0x1c
TOTAL_WATT_RW = 0x1d
ERR_HISTORY_1_R = 0x1e
ERR_HISTORY_2_R = 0x1f
ERR_HISTORY_3_R = 0x20
ERR_HISTORY_4_R = 0x21
ERR_HISTORY_5_R = 0x22
# ENG_MODE = 0x50
# RESERVED = 0x7f
def __init__(self):
super(Dehumidifier, self).__init__()
self.set_dev_type(DeviceTypeCode.DEHUMIDIFIER)
@classmethod
def module_name(cls):
return __name__
class DevicePowerService(DeviceCommonOnOffService):
"""
status off: 06,04,00,00,00,02
status on: 06,04,00,00,01,03
power on: 06,04,80,00,01,83
power off: 06,04,80,00,00,82
"""
# POWER_RW = 0x00
class DeviceOpModeService(DeviceEnum16Service):
# OP_MODE_RW = 0x01
class ParamCode(BaseObject):
AUTO = 0
CFG_DEHUMIDIFIER = 1
CONTINUE_DEHUMIDIFIER = 2
DRY_CLOTHE = 3
AIR_CLEAN = 4
DEV_MILDEW = 5
FAN_ONLY = 6
SUPER_DRY = 7
class DeviceOpTimerService(DeviceUint8Service):
# OP_TIMER_RW = 0x02
class ParamCode(BaseObject):
HR_1 = 1
HR_2 = 2
HR_4 = 4
HR_6 = 6
HR_8 = 8
HR_10 = 10
HR_12 = 12
@classmethod
def text(cls, code):
return '{} hr'.format(code)
class DeviceHumidityCfgService(DeviceUint8Service):
# HUMIDITY_CFG_RW = 0x03
class ParamCode(BaseObject):
HUMI_40 = 40
HUMI_45 = 45
HUMI_50 = 50
HUMI_55 = 55
HUMI_60 = 60
HUMI_65 = 65
HUMI_70 = 70
HUMI_75 = 75
@classmethod
def text(cls, code):
return '{} %'.format(code)
class DeviceDehumidifierLevelService(DeviceFeatureLevelService):
# DEHUMIDIFIER_LEVEL_RW = 0x04
pass
class DeviceDryClotheLevelService(DeviceFeatureLevelService):
# DRY_CLOTHE_LEVEL_RW = 0x05
pass
class DeviceTemperatureRService(DeviceInt8Service):
# TEMPERATURE_R = 0x06
pass
class DeviceHumidityRService(DeviceUint8Service):
# HUMIDITY_R = 0x07
pass
class DeviceFanDirectionAutoService(DeviceCommonOnOffService):
# FAN_DIRECTION_AUTO_RW = 0x08
pass
class DeviceFanDirectionLevelService(DeviceFeatureLevelService):
# FAN_DIRECTION_LEVEL_RW = 0x09
pass
class DeviceWaterFullAlarmRService(DeviceEnum16Service):
# WATER_FULL_ALARM_R = 0x0a
class ParamCode(BaseObject):
NORMAL = 0x00
FULL = 0x01
pass
class DeviceFilterCleanNotifyService(DeviceEnum16Service):
# FILTER_CLEAN_NOTIFY_RW = 0x0b
class ParamCode(BaseObject):
NORMAL = 0x00
CLEAN_NEED = 0x01
pass
class DeviceMoodLedService(DeviceCommonOnOffService):
# MOOD_LED_RW = 0x0c
pass
class DeviceAirCleanModeService(DeviceFeatureLevelService):
# AIR_CLEAN_MODE_RW = 0x0d
pass
class DeviceFanLevelService(DeviceFeatureLevelService):
pass # FAN_LEVEL_RW = 0x0e
class DeviceSideFanRService(DeviceEnum16Service):
# SIDE_FAN_R = 0x0f
class ParamCode(BaseObject):
NORMAL = 0x00
SIDE = 0x01
pass
class DeviceAudioService(DeviceEnum16Service):
# AUDIO_RW = 0x10
class ParamCode(BaseObject):
QUIET = 0x00
BUTTON = 0x01
WATER_FULL_BUTTON = 0x02
pass
class DeviceDefrostDisplayRService(DeviceEnum16Service):
# DEFROST_DISPLAY_R = 0x11
class ParamCode(BaseObject):
NORMAL = 0x00
DEFROST = 0x01
pass
class DeviceDisplayErrRService(DeviceEnum16BitService):
# DISPLAY_ERR_R = 0x12
pass
class DeviceDevMildewService(DeviceCommonOnOffService):
# DEV_MILDEW_RW = 0x13
pass
class DeviceHumidityHighNotifyService(DeviceCommonOnOffService):
# HUMIDITY_HIGH_NOTIFY_RW = 0x14
pass
class DeviceHumidityHighCfgService(DeviceUint8Service):
# HUMIDITY_HIGH_CFG_RW = 0x15
pass
class DeviceKeypadLockService(DeviceCommonOnOffService):
# KEYPAD_LOCK_RW = 0x16
pass
class DeviceRemoteCtrlLockService(DeviceEnum16BitService):
# REMOTE_CTRL_LOCK_RW = 0x17
pass
class DeviceSaaCtrlAudioService(DeviceCommonOnOffService):
# SAA_CTRL_AUDIO_RW = 0x18
pass
class DeviceOpCurrentRService(DeviceUint16Service):
# OP_CURRENT_R = 0x19
pass
class DeviceOpVoltageRService(DeviceUint16Service):
# OP_VOLTAGE_R = 0x1a
pass
class DeviceOpPowerFactorRService(DeviceUint16Service):
# OP_POWER_FACTOR_R = 0x1b
pass
class DeviceOpPowerWattService(DeviceUint16Service):
# OP_POWER_WATT_RW = 0x1c
pass
class DeviceTotalWattService(DeviceUint16Service):
# TOTAL_WATT_RW = 0x1d
pass
class DeviceErrHistory1RService(DeviceEnum16BitService):
# ERR_HISTORY_1_R = 0x1e
pass
class DeviceErrHistory2RService(DeviceEnum16BitService):
# ERR_HISTORY_2_R = 0x1f
pass
class DeviceErrHistory3RService(DeviceEnum16BitService):
# ERR_HISTORY_3_R = 0x20
pass
class DeviceErrHistory4RService(DeviceEnum16BitService):
# ERR_HISTORY_4_R = 0x21
pass
class DeviceErrHistory5RService(DeviceEnum16BitService):
# ERR_HISTORY_5_R = 0x22
pass
if __name__ == '__main__':
FORMAT = '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
dev_cls = Dehumidifier
# dev_cls.print_service_cls_template()
dev_obj = dev_cls()
logger.info('{}'.format(dev_obj.device_detail(cmd_check_serv=False)))
| 2.09375 | 2 |
tcp_connectors/gmqtt_connector.py | evocount/connectors | 2 | 12793719 | import asyncio
import logging
import uuid
import ssl
from gmqtt import Client as MQTTClient
from .exceptions import ConnectionFailed, DestinationNotAvailable
from .base import BaseConnector
logger = logging.getLogger(__name__)
class GMQTTConnector(BaseConnector):
"""GMQTTConnector uses gmqtt library for connectors
running over MQTT.
"""
def __init__(self, host, port, subscribe_topic, publish_topic, **kwargs):
self.host = host
self.port = port
# topics
self.subscribe_topic = subscribe_topic
self.publish_topic = publish_topic
# connection
self.connection_id = uuid.uuid4().hex[:8]
self.is_connected = False
self.client = MQTTClient(self.connection_id)
# callbacks
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
self.client.on_subscribe = self.on_subscribe
self.STOP = asyncio.Event()
# options
self.ack_topic = kwargs.get('ack_topic')
self.enable_ssl = kwargs.get('enable_ssl', False)
self.enable_auth = kwargs.get('enable_auth', False)
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.client_cert = kwargs.get('client_cert')
self.client_key = kwargs.get('client_key')
self.qos = kwargs.get('qos', 2)
def get_connection_details(self):
"""get_connection_details returns the details
about the current MQTT connection.
"""
return dict(
connection_id=self.connection_id,
host=self.host,
port=self.port,
is_connected=self.is_connected,
subscribe_topic=self.subscribe_topic,
publish_topic=self.publish_topic
)
def on_connect(self, *args):
"""on_connect is a callback that gets exectued after the
connection is made.
Arguments:
client {MQTTClient} -- gmqtt.MQTTClient
flags {int} -- connection flags
rc {int} -- connection result code
properties {dict} -- config of the current connection
"""
logger.info("Connected with result code %s", str(args[2]))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# client.subscribe("$SYS/#", qos=0)
if isinstance(self.subscribe_topic, str):
self.client.subscribe(self.subscribe_topic, qos=self.qos)
elif isinstance(self.subscribe_topic, list):
for topic in self.subscribe_topic:
self.client.subscribe(topic, qos=self.qos)
else:
logger.warning('subscribe_topic is either None or an unknown data type.'
' Currently subscribed to 0 topics.')
async def on_message(self, *args):
"""on_message callback gets executed when the connection receives
a message.
Arguments:
client {MQTTClient} -- gmqtt.MQTTClient
topic {string} -- topic from which message was received
payload {bytes} -- actual message bytes received
qos {string} -- message QOS level (0,1,2)
properties {dict} -- message properties
"""
logger.info("%s %s", args[1], str(args[2]))
return 0
@staticmethod
def on_disconnect(*args):
"""on_disconnect is a callback that gets executed
after a disconnection occurs"""
logger.info('Disconnected')
@staticmethod
def on_subscribe(*args):
"""on_subscribe is a callback that gets executed
after a subscription is succesful"""
logger.info('Subscribed')
def ask_exit(self):
"""sets the STOP variable so that a signal gets sent
to disconnect the client
"""
self.STOP.set()
async def start(self):
"""starts initiates the connnection with the broker
Raises:
DestinationNotAvailable: If broker is not available
ConnectionFailed: If connection failed due to any other reason
"""
try:
conn_kwargs = dict(host=self.host, port=self.port)
if self.enable_auth:
self.client.set_auth_credentials(self.username, self.password)
if self.enable_ssl:
assert self.client_cert and self.client_key, \
"Cannot enable ssl without specifying client_cert and client_key"
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(self.client_cert,
keyfile=self.client_key)
conn_kwargs.update(dict(ssl=ssl_context))
await self.client.connect(**conn_kwargs)
self.is_connected = True
except ConnectionRefusedError as e:
# raising from None suppresses the exception chain
raise DestinationNotAvailable(
f'Connection Failed: Error connecting to'
f' {self.host}:{self.port} - {e}'
) from None
except Exception as e:
raise ConnectionFailed(e)
async def publish(self, *args, **kwargs):
"""publishes the message to the topic using client.publish"""
self.client.publish(*args, **kwargs)
async def stop(self):
"""force stop the connection with the MQTT broker."""
await self.client.disconnect()
self.is_connected = False
| 2.546875 | 3 |
server/tests/steps/__init__.py | JeremyJacquemont/weaverbird | 54 | 12793720 | <reponame>JeremyJacquemont/weaverbird
"""
This module contains unit tests for every supported pipeline step
"""
| 0.71875 | 1 |
docs/tutorials/scripts/pure_scattering_plot.py | christopherlovell/hyperion | 37 | 12793721 | import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
mo = ModelOutput('pure_scattering.rtout')
image_fnu = mo.get_image(inclination=0, units='MJy/sr', distance=300. * pc)
image_pol = mo.get_image(inclination=0, stokes='linpol')
fig = plt.figure(figsize=(8, 8))
# Make total intensity sub-plot
ax = fig.add_axes([0.1, 0.3, 0.4, 0.4])
ax.imshow(image_fnu.val[:, :, 0], extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=4e9)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_ylabel("y (solar radii)")
ax.set_title("Surface brightness")
# Make linear polarization sub-plot
ax = fig.add_axes([0.51, 0.3, 0.4, 0.4])
im = ax.imshow(image_pol.val[:, :, 0] * 100., extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=100.)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_title("Linear Polarization")
ax.set_yticklabels('')
axcb = fig.add_axes([0.92, 0.3, 0.02, 0.4])
cb=plt.colorbar(im, cax=axcb)
cb.set_label('%')
fig.savefig('pure_scattering_inner_disk.png', bbox_inches='tight')
| 2.3125 | 2 |
hug_peewee/__init__.py | timothycrosley/hug_peewee | 7 | 12793722 | """Defines how the hug_peewee package exposes modules as well as exposes it's version number"""
from hug_peewee._version import current
from hug_peewee.connection import ENGINES
from hug_peewee import connection
__version__ = current
| 1.539063 | 2 |
aoc2021/day05-hydrotermal_venture/solution2021d05.py | carloscabello/AoC | 0 | 12793723 | """Day 4: Giant Squid"""
import sys, os, inspect
# necessary to import aoc2021/utils.py moudule
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import utils
exampledata = ['0,9 -> 5,9', '8,0 -> 0,8', '9,4 -> 3,4', '2,2 -> 2,1', '7,0 -> 7,4', '6,4 -> 2,0', '0,9 -> 2,9', '3,4 -> 1,4', '0,0 -> 8,8', '5,5 -> 8,2']
def input_parser(inputdata):
"""Given the input of the puzzle represent it with a list of tuples
Parameters
----------
inputdata : list
A list of strings, each being a line of the raw input file.
Returns
----------
inputdata : list
A list of strings, each being a line of the raw input file.
max_x : int
Max x coordinate value from all points
max_y : int
Max y coordinate value from all points
"""
res = []
max_x = 0
max_y = 0
for line in inputdata:
pointpair = ()
for strpoint in line.split('->'):
strpoint = strpoint.strip()
point = ()
for indexcoord, strcoord in enumerate(strpoint.split(',')):
valuecoord = int(strcoord)
point += (valuecoord,)
if(indexcoord==0 and max_x<valuecoord):
max_x = valuecoord
elif(0<indexcoord and max_y<valuecoord):
max_y = valuecoord
pointpair += (point,)
res.append(pointpair)
# return a list of points-pair (x1,y1) and (x2,y2)
# each point is a pair x,y coordinates
return res, max_x, max_y
def closedrange(start, stop):
"Return all values in the interval [start,stop] no matter which one is greater"
step = 1 if (start<=stop) else -1
return range(start, stop + step, step)
def vent_mapper(inputdata, include_diagonal_lines=False):
"""Given the already parsed input data from puzzle aoc2021day05 return the final solution
Parameters
----------
inputdata : list
A list of tuples, each tuple representing a pair of points. Each point itself is a tuple (int,int).
include_diagonal_lines : bool (Default=False)
If points describe a diagonal line, include them in the mapping.
The default behavior is to only include vertical o diagonal lines
"""
ventpointpairs, max_x, max_y = input_parser(inputdata)
ventmap = [[0]*(max_x+1) for i in range(max_y+1)] # to index the ventmap: ventmap[y][x]
for ventsegment in ventpointpairs:
x1,y1 = ventsegment[0]
x2,y2 = ventsegment[1]
# only horizontal and vertical lines
if(x1 == x2):
for y in closedrange(y1, y2):
ventmap[y][x1] += 1
elif(y1 == y2):
for x in closedrange(x1, x2):
ventmap[y1][x] += 1
# diagonal line at exactly 45 degrees
elif (include_diagonal_lines):
for x,y in closedrange_diag( (x1,y1), (x2,y2) ):
ventmap[y][x] += 1
return vent_counter(ventmap,2)
def closedrange_diag(start, stop):
"Return all points (x,y) from a 45º diagonal line from (x1,y1) to (x2,y2)"
x1, y1 = start
x2, y2 = stop
return zip(closedrange(x1, x2), closedrange(y1, y2))
def vent_counter(ventmap, overlap):
res = 0
for ventrow in ventmap:
for ventelem in ventrow:
if (overlap <= ventelem):
res +=1
return res
def main():
inputdata = []
# run script with arguments: load the input file
if(2 <= len(sys.argv)):
inputdata = utils.loadinput(sys.argv[1])
# run script with no arguments: load example data
else:
inputdata = exampledata
print(f"Puzzle input (example)")
print(f"{exampledata}\n")
print(f"Answer (part 1): {vent_mapper(inputdata)}\n") # Correct example answer: 5
print(f"Answer (part 2): {vent_mapper(inputdata, True)}") # Correct example answer: 12
pass
if __name__ == "__main__":
main() | 3.234375 | 3 |
enabling_addons/enabling_addons_example_2.py | CGArtPython/bpy_building_blocks_examples | 0 | 12793724 | <gh_stars>0
import bpy
import addon_utils
def enable_addon(addon_module_name):
loaded_default, loaded_state = addon_utils.check(addon_module_name)
if not loaded_state:
addon_utils.enable(addon_module_name)
##########################################################
# _____ _ _ _ _____
# | ___| | | _| || |_ / __ \
# | |____ ____ _ _ __ ___ _ __ | | ___ |_ __ _| `' / /'
# | __\ \/ / _` | '_ ` _ \| '_ \| |/ _ \ _| || |_ / /
# | |___> < (_| | | | | | | |_) | | __/ |_ __ _| ./ /___
# \____/_/\_\__,_|_| |_| |_| .__/|_|\___| |_||_| \_____/
# | |
# |_|
##########################################################
enable_addon(addon_module_name="add_mesh_extra_objects")
for i in range(10):
bpy.ops.mesh.primitive_solid_add(source='12', size=i*0.1)
bpy.ops.object.modifier_add(type='WIREFRAME')
| 1.953125 | 2 |
src/15/15953.py | youngdaLee/Baekjoon | 11 | 12793725 | """
15953. 상금 헌터
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 160 ms
해결 날짜: 2020년 9월 25일
"""
def main():
for _ in range(int(input())):
a, b = map(int, input().split())
money = 0
if a == 1: money += 5000000
elif 2 <= a <= 3: money += 3000000
elif 4 <= a <= 6: money += 2000000
elif 7 <= a <= 10: money += 500000
elif 11 <= a <= 15: money += 300000
elif 16 <= a <= 21: money += 100000
if b == 1: money += 5120000
elif 2 <= b <= 3: money += 2560000
elif 4 <= b <= 7: money += 1280000
elif 8 <= b <= 15: money += 640000
elif 16 <= b <= 31: money += 320000
print(money)
if __name__ == '__main__':
main() | 3.609375 | 4 |
srcs/sg/objects/abstract_object.py | pomponchik/computor_v2 | 0 | 12793726 | <reponame>pomponchik/computor_v2
from srcs.errors import RuntimeASTError
class AbstractObject:
def __init__(self):
raise NotImplementedError
@classmethod
def create_from_node(cls, node):
raise NotImplementedError('operation not defined')
def representation(self, context):
raise NotImplementedError('operation not defined')
def type_representation(self):
raise NotImplementedError('operation not defined')
def one_string_representation(self, context):
return self.representation(context)
def operation(self, other, operation, operation_node):
if operation in ('*', '+', '**'):
try:
return self.real_operation(other, operation, operation_node)
except RuntimeASTError as e:
if not self.type_mark == 'm':
return other.real_operation(self, operation, operation_node)
raise e
else:
return self.real_operation(other, operation, operation_node)
def real_operation(self, other, operation, operation_node):
raise RuntimeASTError(f'the "{operation}" operation between {self.type_representation()} and {other.type_representation()} is not defined', operation_node)
| 2.5 | 2 |
utils/cal_normalize.py | qaz670756/LSNet | 3 | 12793727 | import numpy as np
import cv2
import os
from glob import glob
from tqdm import tqdm
img_h, img_w = 256, 256
means, stdevs = [], []
img_list = []
TRAIN_DATASET_PATH = 'data/Real/subset/train/B'
image_fns = glob(os.path.join(TRAIN_DATASET_PATH, '*.*'))
for single_img_path in tqdm(image_fns):
img = cv2.imread(single_img_path)
img = cv2.resize(img, (img_w, img_h))
img = img[:, :, :, np.newaxis]
img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in range(3):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# BGR --> RGB , CV读取的需要转换,PIL读取的不用转换
means.reverse()
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
# normMean = [0.35389897, 0.39104056, 0.34307468]
# normStd = [0.2158508, 0.23398565, 0.20874721]
# normMean1 = [0.47324282, 0.498616, 0.46873462]
# normStd1 = [0.2431127, 0.2601882, 0.25678185]
# [0.413570895, 0.44482827999999996, 0.40590465]
# [0.22948174999999998, 0.24708692499999999, 0.23276452999999997] | 2.46875 | 2 |
iniciante/python/2493-jogo-do-operador.py | tfn10/beecrowd | 0 | 12793728 | <gh_stars>0
def jogo_do_operador():
while True:
expressoes = list()
resposta_dos_jogadores = list()
jogadores_que_erraram = list()
try:
quantidade_de_expressoes = int(input())
for expre in range(quantidade_de_expressoes):
expressoes.append(list(map(str, input().split())))
for resposta in range(quantidade_de_expressoes):
resposta_dos_jogadores.append(list(map(str, input().split())))
for i in resposta_dos_jogadores:
nome_do_jogador = i[0]
indice = int(i[1]) - 1
expressao = i[2]
operador_x = int(expressoes[indice][0])
operador_y = int(expressoes[indice][1].replace('=', ' ').split()[0])
operador_z = int(expressoes[indice][1].replace('=', ' ').split()[1])
if expressao == '+':
if operador_x + operador_y != operador_z:
jogadores_que_erraram.append(nome_do_jogador)
elif expressao == '-':
if operador_x - operador_y != operador_z:
jogadores_que_erraram.append(nome_do_jogador)
elif expressao == '*':
if operador_x * operador_y != operador_z:
jogadores_que_erraram.append(nome_do_jogador)
jogadores_que_erraram.sort()
if jogadores_que_erraram:
if len(jogadores_que_erraram) != quantidade_de_expressoes:
for jogador in jogadores_que_erraram:
if jogador == jogadores_que_erraram[len(jogadores_que_erraram)-1]:
print(jogador)
else:
print(jogador, end=' ')
else:
print('None Shall Pass!')
else:
print('You Shall All Pass!')
except EOFError:
break
except ValueError:
break
jogo_do_operador()
| 3.578125 | 4 |
a06_Seq2seqWithAttention/a1_seq2seq.py | sunshinenum/text_classification | 7,723 | 12793729 | # -*- coding: utf-8 -*-
import tensorflow as tf
# 【该方法测试的时候使用】返回一个方法。这个方法根据输入的值,得到对应的索引,再得到这个词的embedding.
def extract_argmax_and_embed(embedding, output_projection=None):
"""
Get a loop_function that extracts the previous symbol and embeds it. Used by decoder.
:param embedding: embedding tensor for symbol
:param output_projection: None or a pair (W, B). If provided, each fed previous output will
first be multiplied by W and added B.
:return: A loop function
"""
def loop_function(prev, _):
if output_projection is not None:
prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
prev_symbol = tf.argmax(prev, 1) #得到对应的INDEX
emb_prev = tf.gather(embedding, prev_symbol) #得到这个INDEX对应的embedding
return emb_prev
return loop_function
# RNN的解码部分。
# 如果是训练,使用训练数据的输入;如果是test,将t时刻的输出作为t+1时刻的s输入
def rnn_decoder_with_attention(decoder_inputs, initial_state, cell, loop_function,attention_states,scope=None):#3D Tensor [batch_size x attn_length x attn_size]
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].it is decoder input.
initial_state: 2D Tensor with shape [batch_size x cell.state_size].it is the encoded vector of input sentences, which represent 'thought vector'
cell: core_rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].it is represent input X.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
print("rnn_decoder_with_attention started...")
state = initial_state #[batch_size x cell.state_size].
_, hidden_size = state.get_shape().as_list() #200
attention_states_original=attention_states
batch_size,sequence_length,_=attention_states.get_shape().as_list()
outputs = []
prev = None
#################################################
for i, inp in enumerate(decoder_inputs):#循环解码部分的输入。如sentence_length个[batch_size x input_size]
# 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入
if loop_function is not None and prev is not None:#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入
with tf.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
tf.get_variable_scope().reuse_variables()
##ATTENTION#################################################################################################################################################
# 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size]
query=state
W_a = tf.get_variable("W_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
query=tf.matmul(query, W_a) #[batch_size,hidden_size]
query=tf.expand_dims(query,axis=1) #[batch_size, 1, hidden_size]
U_a = tf.get_variable("U_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
U_aa = tf.get_variable("U_aa", shape=[ hidden_size])
attention_states=tf.reshape(attention_states,shape=(-1,hidden_size)) #[batch_size*sentence_length,hidden_size]
attention_states=tf.matmul(attention_states, U_a) #[batch_size*sentence_length,hidden_size]
#print("batch_size",batch_size," ;sequence_length:",sequence_length," ;hidden_size:",hidden_size) #print("attention_states:", attention_states) #(?, 200)
attention_states=tf.reshape(attention_states,shape=(-1,sequence_length,hidden_size)) # TODO [batch_size,sentence_length,hidden_size]
#query_expanded: [batch_size,1, hidden_size]
#attention_states_reshaped: [batch_size,sentence_length,hidden_size]
attention_logits=tf.nn.tanh(query+attention_states+U_aa) #[batch_size,sentence_length,hidden_size]. additive style
# 2.get possibility of attention
attention_logits=tf.reshape(attention_logits,shape=(-1,hidden_size)) #batch_size*sequence_length [batch_size*sentence_length,hidden_size]
V_a = tf.get_variable("V_a", shape=[hidden_size,1],initializer=tf.random_normal_initializer(stddev=0.1)) #[hidden_size,1]
attention_logits=tf.matmul(attention_logits,V_a) #最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1]
attention_logits=tf.reshape(attention_logits,shape=(-1,sequence_length)) #attention_logits:[batch_size,sequence_length]
##########################################################################################################################################################
#attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length]
attention_logits_max=tf.reduce_max(attention_logits,axis=1,keep_dims=True) #[batch_size x 1]
# possibility distribution for each encoder input.it means how much attention or focus for each encoder input
p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length]
# 3.get weighted sum of hidden state for each encoder input as attention state
p_attention=tf.expand_dims(p_attention,axis=2) #[batch_size x attn_length x 1]
# attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length];
attention_final=tf.multiply(attention_states_original,p_attention) #[batch_size x attn_length x attn_size]
context_vector=tf.reduce_sum(attention_final,axis=1) #[batch_size x attn_size]
############################################################################################################################################################
#inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size]
output, state = cell(inp, state,context_vector) #attention_final TODO 使用RNN走一步
outputs.append(output) # 将输出添加到结果列表中
if loop_function is not None:
prev = output
print("rnn_decoder_with_attention ended...")
return outputs, state | 3.21875 | 3 |
kitsune/users/migrations/0025_auto_20200926_0638.py | The-smooth-operator/kitsune | 929 | 12793730 | # Generated by Django 2.2.14 on 2020-09-26 06:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20200914_0433'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'permissions': (('view_karma_points', 'Can view karma points'), ('deactivate_users', 'Can deactivate users'))},
),
]
| 1.375 | 1 |
mysite/services/plot_bound_service.py | mscibxablach/PhotoApp2 | 0 | 12793731 | <gh_stars>0
from mysite.plot_bound_detector.PlotBoundCalculator import PlotBoundCalculator
class PlotBoundService:
def __init__(self, plot_bound_detector):
self.plot_bound_detector = plot_bound_detector
def get_plot_bound_ratio(self, image):
top_chunks, bottom_chunks = self.plot_bound_detector.get_plot_bound(image)
top, bottom, ratio = PlotBoundCalculator.calculate_distance_ratio(top_chunks, bottom_chunks)
return top, bottom, ratio
| 2.515625 | 3 |
main.py | csepy/csepy | 1 | 12793732 | <filename>main.py<gh_stars>1-10
#!/usr/bin/python3
import sys
from csepy.csepy import Start
if len(sys.argv) > 1:
Start(sysargs=sys.argv)
elif __name__ == '__main__':
Start()
| 1.875 | 2 |
handlers/other/clan.py | roomdie/KingsEmpiresBot | 0 | 12793733 | <gh_stars>0
import typing
import keyboards
import states
import re
import random
from loader import dp
from aiogram import types
from aiogram.dispatcher import filters
from aiogram import exceptions
from aiogram.dispatcher import FSMContext
from utils.misc.read_file import read_txt_file
from utils.db_api import tables, db_api
from utils.classes import kb_constructor, timer
from sqlalchemy import desc, and_
from utils.classes.regexps import ClanRegexp
@dp.message_handler(state="*", commands="clan")
async def clan_command_handler(message: types.Message, state: FSMContext):
user_id = message.from_user.id
session = db_api.CreateSession()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).join(tables.Clan).first()
buildings: tables.Buildings = session.db.query(
tables.Buildings).filter_by(user_id=user_id).first()
if buildings.clan_building_lvl == 0:
sticker = read_txt_file("sticker/sad_knight")
await message.answer_sticker(sticker=sticker)
msg_text = read_txt_file("text/clan/destroyed_clan")
await message.answer(
text=msg_text,
)
session.close()
return
if clan_member is None:
msg_text = read_txt_file("text/clan/without_clan")
clan_msg = await message.answer(
text=msg_text,
reply_markup=keyboards.clan.kb_none_clan
)
await state.update_data({
"user_id": user_id,
"clan_msg": clan_msg
})
session.close()
return
clan_members: typing.List[tables.ClanMember] = session.db.query(
tables.ClanMember).filter_by(clan_id=clan_member.clan_id).all()
clan_creator: tables.User = session.db.query(
tables.User).filter_by(user_id=clan_member.clan.creator).first()
keyboard = kb_constructor.StandardKeyboard(user_id=user_id).create_clan_keyboard()
msg_text = read_txt_file("text/clan/in_clan")
clan_msg = await message.answer(
text=msg_text.format(
clan_member.clan.emoji,
clan_member.clan.name,
clan_member.clan.description[:21],
clan_member.clan.rating,
len(clan_members),
clan_creator.first_name,
clan_member.rank),
reply_markup=keyboard
)
await state.update_data({
"user_id": user_id,
"clan_msg": clan_msg
})
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.back)
async def clan_back_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
await clan_msg.edit_text(
text=clan_msg.html_text,
reply_markup=clan_msg.reply_markup,
)
await callback.answer()
@dp.callback_query_handler(regexp=ClanRegexp.without_clan)
async def none_clan_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
session = db_api.CreateSession()
townhall: tables.TownHall = session.db.query(
tables.TownHall).filter_by(user_id=user_id).first()
if callback.data == "create_clan":
if townhall.money >= 5000:
townhall.money -= 5000
else:
await callback.answer(
"Для создания клана не хватает {} 💰".format(
5000 - townhall.money)
)
session.close()
return
clan_msg = await clan_msg.edit_text(
text="Придумайте <b>название</b> для своего\n"
"клана. (макс. 16 символов)\n"
"<i>Ответив на это сообщение.</i>"
)
await state.update_data({
"message_id": clan_msg.message_id
})
await states.Clan.set_name.set()
elif callback.data == "clans_rating":
clan_table: typing.List[tables.Clan] = session.db.query(
tables.Clan).order_by(desc(tables.Clan.rating)).all()
text = ""
clan_num = 1
for clan in clan_table[:10]:
text += "{}. <b>{}</b> [ <code>{}</code> ⭐ ]\n".format(clan_num, clan.name, clan.rating)
clan_num += 1
msg_text = read_txt_file("text/clan/rating")
await clan_msg.edit_text(
text=msg_text.format(text),
reply_markup=keyboards.clan.kb_back
)
elif callback.data == "clan_invitation":
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_invitation_keyboard()
msg_text = read_txt_file("text/clan/clan_invitations")
clan_invitation_msg = await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboard
)
await state.update_data({
"clan_invitation_msg": clan_invitation_msg
})
await callback.answer()
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.menu)
async def clan_menu_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
session = db_api.CreateSession()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).join(tables.Clan).first()
clan_members: typing.List[tables.ClanMember] = session.db.query(
tables.ClanMember).filter_by(clan_id=clan_member.clan_id).all()
clans_search: typing.List[tables.Clan] = session.db.query(
tables.Clan).filter(and_(
tables.Clan.state == "search", tables.Clan.clan_id != clan_member.clan_id)).all()
if callback.data == "clan_war":
if clan_member.clan.state == "search":
if clans_search:
clan_1: tables.Clan = clan_member.clan
clan_2: tables.Clan = random.choice(clans_search)
clan_1.state = "contest"
clan_2.state = "contest"
new_contest = tables.Contest(
clan_id_1=clan_1.clan_id,
clan_id_2=clan_2.clan_id,
recent_log=["Война началась."],
log=["Война началась."],
state_timer=None,
territory_names=["Russia", "Germany"],
territory_owners=[None, None],
territory_units=[0, 0],
territory_captures=[None, None],
clans_rating=[0, 0],
colors=["blue", "red"]
)
session.db.add(new_contest)
msg_text = read_txt_file("text/clan/contest")
await clan_msg.edit_text(
text=msg_text.format(clan_member.clan.contest_count),
reply_markup=keyboards.clan.kb_back
)
session.close()
return
msg_text = read_txt_file("text/clan/search_contest")
await clan_msg.edit_text(
text=msg_text.format(clan_member.clan.contest_count),
reply_markup=keyboards.clan.kb_cancel_contest
)
elif clan_member.clan.state == "contest":
msg_text = read_txt_file("text/clan/contest")
await clan_msg.edit_text(
text=msg_text.format(clan_member.clan.contest_count),
reply_markup=keyboards.clan.kb_back
)
elif clan_member.clan.state == "ending":
msg_text = read_txt_file("text/clan/ending_contest")
await clan_msg.edit_text(
text=msg_text.format(clan_member.clan.contest_count),
reply_markup=keyboards.clan.kb_back
)
else:
msg_text = read_txt_file("text/clan/none_contest")
await clan_msg.edit_text(
text=msg_text.format(clan_member.clan.contest_count),
reply_markup=keyboards.clan.kb_search_contest
)
elif callback.data == "clan_members":
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_members_keyboard()
msg_text = read_txt_file("text/clan/members")
clan_members_msg = await clan_msg.edit_text(
text=msg_text.format(
clan_member.clan.name,
clan_member.clan.description[:21],
len(clan_members),
),
reply_markup=keyboard
)
await state.update_data({
"clan_members_msg": clan_members_msg
})
elif callback.data == "clan_settings":
await callback.answer("В разработке...")
await callback.answer()
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.invitation_page)
async def clan_invitation_pages_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
page_move = re.findall(r"invitation_page_(\d+)", callback.data)[0]
page = int(page_move)
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_invitation_keyboard(page)
try:
msg_text = read_txt_file("text/clan/clan_invitations")
await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboard
)
except exceptions.MessageNotModified:
pass
@dp.callback_query_handler(regexp=ClanRegexp.invitation)
async def clan_invitation_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
invitation = re.findall(r"open_invitation_(\d+)", callback.data)
accept_invitation = re.findall(r"accept_invitation_(\d+)", callback.data)
cancel_invitation = re.findall(r"cancel_invitation_(\d+)", callback.data)
session = db_api.CreateSession()
if invitation:
invitation_id = int(invitation[0])
clan_invitation_table: tables.ClanInvitation = session.db.query(
tables.ClanInvitation).filter_by(id=invitation_id).join(tables.Clan).first()
keyboard = kb_constructor.StandardKeyboard(
user_id=user_id).create_invitation_keyboard(invitation_id=invitation_id)
await clan_msg.edit_text(
text="{}\n\n"
"Рейтинг: \n"
"Лидер: \n"
"Участников: \n"
"Осталось: ".format(clan_invitation_table.clan.name),
reply_markup=keyboard
)
elif accept_invitation:
invitation_id = int(accept_invitation[0])
clan_invitation_table: tables.ClanInvitation = session.db.query(
tables.ClanInvitation).filter_by(id=invitation_id).join(tables.Clan).first()
if clan_invitation_table.clan.creator == user_id:
rank = "Лидер"
else:
rank = "Рекрут"
new_clan_member = tables.ClanMember(
clan_id=clan_invitation_table.clan_id,
user_id=user_id,
rank=rank,
contest_score=0,
clan_units=0,
units_donate=0,
donate_timer=0
)
session.db.add(new_clan_member)
session.db.query(tables.ClanInvitation).filter_by(
id=invitation_id).delete()
session.db.commit()
await callback.answer("Вы вступили в клан!")
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_invitation_keyboard()
msg_text = read_txt_file("text/clan/clan_invitations")
await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboard
)
elif cancel_invitation:
invitation_id = int(cancel_invitation[0])
session.db.query(tables.ClanInvitation).filter_by(
id=invitation_id).delete()
session.db.commit()
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_invitation_keyboard()
msg_text = read_txt_file("text/clan/clan_invitations")
await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboard
)
await callback.answer()
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.get_clan_units)
async def clan_getting_units_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
session = db_api.CreateSession()
townhall: tables.TownHall = session.db.query(
tables.TownHall).filter_by(user_id=user_id).first()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).first()
if callback.data == "get_clan_units":
if clan_member.donate_timer != 0:
await callback.answer("Станет доступно позже.")
session.close()
return
msg_text = read_txt_file("text/clan/get_clan_units")
keyboard = kb_constructor.StandardKeyboard(
user_id=user_id).create_get_clan_units_keyboard()
sticker = read_txt_file("sticker/get_clan_units")
await callback.message.answer_sticker(sticker=sticker)
await callback.message.answer(
text=msg_text.format(townhall.country_name, callback.from_user.get_mention()),
reply_markup=keyboard
)
clan_member.donate_timer = timer.Timer.set_timer(28800)
session.db.commit()
await callback.answer()
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.member)
async def clan_members_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
clan_members_msg: types.Message = data.get("clan_members_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
session = db_api.CreateSession()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).first()
clan_member_id = re.findall(r"check_clan_member_(\d+)", callback.data)
raise_member = re.findall(r"raise_clan_member_(\d+)", callback.data)
kick_member = re.findall(r"kick_clan_member_(\d+)", callback.data)
if clan_member_id:
member_id = int(clan_member_id[0])
checked_clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(id=member_id).join(tables.User).first()
keyboard = kb_constructor.StandardKeyboard(
user_id=user_id).create_member_keyboard(member_id, clan_member)
msg_text = read_txt_file("text/clan/member")
await clan_msg.edit_text(
text=msg_text.format(
checked_clan_member.user.first_name,
checked_clan_member.contest_score,
checked_clan_member.rank,
),
reply_markup=keyboard
)
elif raise_member:
member_id = int(raise_member[0])
checked_clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(id=member_id).first()
if clan_member.rank in ("Заместитель", "Лидер"):
if checked_clan_member.rank == "Рекрут":
checked_clan_member.rank = "Старейшина"
elif checked_clan_member.rank == "Старейшина":
checked_clan_member.rank = "Заместитель"
session.db.commit()
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_members_keyboard()
await clan_msg.edit_text(
text=clan_members_msg.html_text,
reply_markup=keyboard
)
elif kick_member:
member_id = int(kick_member[0])
checked_clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(id=member_id).first()
if clan_member.rank in ("Заместитель", "Лидер"):
if checked_clan_member.rank != "Лидер" and clan_member.rank != checked_clan_member.rank:
session.db.query(
tables.ClanMember).filter_by(id=member_id).delete()
session.db.commit()
keyboard = kb_constructor.PaginationKeyboard(
user_id=user_id).create_members_keyboard()
await clan_msg.edit_text(
text=clan_members_msg.html_text,
reply_markup=keyboard
)
elif callback.data == "leave_clan":
await clan_msg.edit_text(
text="Вы уверены,\n что хотите покинуть клан?",
reply_markup=keyboards.clan.kb_leave_clan
)
elif callback.data == "yes_leave_clan":
session.db.query(
tables.ClanMember).filter_by(user_id=user_id).delete()
session.db.commit()
await clan_msg.edit_text(
text="Клан",
reply_markup=keyboards.clan.kb_none_clan
)
elif callback.data == "no_leave_clan":
await clan_msg.edit_text(
text=clan_msg.html_text,
reply_markup=clan_msg.reply_markup
)
await callback.answer()
session.close()
@dp.callback_query_handler(regexp=ClanRegexp.contest)
async def clan_contest_handler(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
clan_msg: types.Message = data.get("clan_msg")
if data.get("user_id") != user_id:
msg_text = read_txt_file("text/hints/foreign_button")
return await callback.answer(msg_text)
session = db_api.CreateSession()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).join(tables.Clan).first()
if callback.data == "start_search_contest":
msg_text = read_txt_file("text/clan/search_contest")
await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboards.clan.kb_cancel_contest
)
clan_member.clan.state = "search"
elif callback.data == "cancel_search_contest":
msg_text = read_txt_file("text/clan/none_contest")
await clan_msg.edit_text(
text=msg_text,
reply_markup=keyboards.clan.kb_search_contest
)
clan_member.clan.state = None
await callback.answer()
session.close()
@dp.message_handler(filters.IsReplyFilter(True), state=states.Clan.set_name)
async def set_clan_name(message: types.Message, state: FSMContext):
data = await state.get_data()
if message.reply_to_message.message_id == data.get("message_id"):
clan_name = message.text[:16]
clan_msg = await message.reply(
text="Придумайте <b>описание</b> для \n"
"клана. (макс. 21 символ)\n"
"<i>Ответив на это сообщение.</i>"
)
await state.update_data({
"clan_name": clan_name,
"message_id": clan_msg.message_id
})
await states.Clan.set_description.set()
@dp.message_handler(filters.IsReplyFilter(True), state=states.Clan.set_description)
async def set_clan_description(message: types.Message, state: FSMContext):
data = await state.get_data()
user_id = message.from_user.id
if message.reply_to_message.message_id == data.get("message_id"):
clan_description = message.text[:21]
await state.update_data({
"clan_description": clan_description,
})
keyboard = kb_constructor.StandardKeyboard(
user_id=user_id).create_emoji_clan_keyboard()
set_emoji_msg = await message.answer(
text="Выберите <b>логотип</b> вашего клана.",
reply_markup=keyboard
)
await state.update_data({
"set_emoji_msg": set_emoji_msg
})
await states.Clan.set_emoji.set()
@dp.callback_query_handler(state=states.Clan.set_emoji)
async def set_clan_emoji(callback: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
user_id = callback.from_user.id
emojis = [
"❤", "🔥", "💖", "🍩", "🌶", "💩",
"💧", "🌈", "🌞", "🌻", "🌹", "☠",
"🥀", "🦄", "🐙", "🎃", "👾", "🔱"
]
session = db_api.CreateSession()
if callback.data in emojis:
set_emoji_msg: types.Message = data.get("set_emoji_msg")
await set_emoji_msg.delete()
clan_name = data.get("clan_name")
clan_description = data.get("clan_description")
clan_emoji = callback.data
new_clan = tables.Clan(
name=clan_name,
description=clan_description,
emoji=clan_emoji,
rating=0,
money=0,
units=0,
creator=user_id
)
session.db.add(new_clan)
session.db.commit()
clan: tables.Clan = session.db.query(
tables.Clan).filter_by(creator=user_id).first()
if clan is not None:
new_clan_member = tables.ClanMember(
clan_id=clan.clan_id,
user_id=user_id,
clan_units=0,
rank="Лидер"
)
session.db.add(new_clan_member)
session.db.commit()
clan_member: tables.ClanMember = session.db.query(
tables.ClanMember).filter_by(user_id=user_id).first()
clan_members: typing.List[tables.ClanMember] = session.db.query(
tables.ClanMember).filter_by(clan_id=clan_member.clan_id).all()
clan_creator: tables.User = session.db.query(
tables.User).filter_by(user_id=clan_member.clan.creator).first()
keyboard = kb_constructor.StandardKeyboard(
user_id=user_id).create_clan_keyboard()
msg_text = read_txt_file("text/clan/in_clan")
clan_msg = await callback.message.answer(
text=msg_text.format(
clan.emoji,
clan.name,
clan.description[:21],
clan.rating,
len(clan_members),
clan_creator.first_name,
clan_member.rank),
reply_markup=keyboard
)
await state.reset_state(with_data=True)
await state.set_data({
"user_id": user_id,
"clan_msg": clan_msg
})
session.close()
@dp.message_handler(filters.IsReplyFilter(True), regexp=ClanRegexp.invite, state="*")
async def invite_user_clan_handler(message: types.Message):
user_id = message.from_user.id
replied_user = message.reply_to_message.from_user
session = db_api.CreateSession()
clan_member_table: tables.ClanMember = session.filter_by_user_id(
user_id=user_id, table=tables.ClanMember
)
if clan_member_table is None:
return
if clan_member_table.rank in ("Лидер", "Заместитель", "Старейшина"):
invited_clan_member_table: tables.ClanMember = session.filter_by_user_id(
user_id=replied_user.id, table=tables.ClanMember
)
invited_user_table: tables.User = session.filter_by_user_id(
user_id=replied_user.id, table=tables.User)
invited_clan_invitation_table: tables.ClanInvitation = session.db.query(
tables.ClanInvitation).filter_by(
clan_id=clan_member_table.clan_id, user_id=replied_user.id).first()
if invited_user_table is None or invited_clan_invitation_table is not None:
return
if (invited_clan_member_table is None) and (not replied_user.is_bot):
time_set = timer.Timer.set_timer(86400)
new_invitation = tables.ClanInvitation(
user_id=replied_user.id,
clan_id=clan_member_table.clan_id,
timer=time_set
)
session.db.add(new_invitation)
session.close()
| 2.171875 | 2 |
trojsten/settings/wiki.py | MvonK/web | 5 | 12793734 | from trojsten.settings.production import *
SITE_ID = 6
NAVBAR_SITES = [1, 3, 4, 5]
ROOT_URLCONF = "trojsten.urls.wiki"
| 1.265625 | 1 |
bubblegum/backend/mpl/contour.py | danielballan/xray-vision | 4 | 12793735 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .. import QtCore, QtGui
from . import AbstractMPLDataView
from .. import AbstractDataView2D
import logging
logger = logging.getLogger(__name__)
class ContourView(AbstractDataView2D, AbstractMPLDataView):
"""
The ContourView provides a UI widget for viewing a number of 1-D
data sets as a contour plot, starting from dataset 0 at y = 0
"""
def __init__(self, fig, data_list=None, cmap=None, norm=None, *args,
**kwargs):
"""
__init__ docstring
Parameters
----------
fig : figure to draw the artists on
x_data : list
list of vectors of x-coordinates
y_data : list
list of vectors of y-coordinates
lbls : list
list of the names of each data set
cmap : colormap that matplotlib understands
norm : mpl.colors.Normalize
"""
# set some defaults
# no defaults yet
# call the parent constructors
super(ContourView, self).__init__(data_list=data_list, fig=fig,
cmap=cmap, norm=norm, *args,
**kwargs)
# create the matplotlib axes
self._ax = self._fig.add_subplot(1, 1, 1)
self._ax.set_aspect('equal')
# plot the data
self.replot()
def replot(self):
"""
Override
Replot the data after modifying a display parameter (e.g.,
offset or autoscaling) or adding new data
"""
# TODO: This class was originally written to convert a 1-D stack into a
# 2-D contour. Rewrite this replot method
# get the keys from the dict
keys = list(six.iterkeys(self._data))
# number of datasets in the data dict
num_keys = len(keys)
# cannot plot data if there are no keys
if num_keys < 1:
return
# set the local counter
counter = num_keys - 1
# @tacaswell Should it be required that all datasets are the same
# length?
num_coords = len(self._data[keys[0]][0])
# declare the array
self._data_arr = np.zeros((num_keys, num_coords))
# add the data to the main axes
for key in self._data.keys():
# get the (x,y) data from the dictionary
(x, y) = self._data[key]
# add the data to the array
self._data_arr[counter] = y
# decrement the counter
counter -= 1
# get the first dataset to get the x axis and number of y datasets
x, y = self._data[keys[0]]
y = np.arange(len(keys))
# TODO: Colormap initialization is not working properly.
self._ax.contourf(x, y, self._data_arr) # , cmap=colors.Colormap(self._cmap))
| 0.855469 | 1 |
utilities/GraphLoss.py | adik0861/RetinaNet | 0 | 12793736 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
import re
import io
loss = Path.cwd().parent.joinpath('savefiles', 'checkpoints', 'loss.log').read_text()
loss = re.sub(r'[\]\[]', '', loss)
df = pd.read_csv(io.StringIO(loss), names=['epoch', 'iteration', 'cls_loss', 'box_loss', 'run_loss'])
def avg_loss(period):
_df = df.groupby(df.index // period).mean()
x = np.array(list(_df.index))
y_cls = np.array(_df['cls_loss'].to_list())
y_box = np.array(_df['box_loss'].to_list())
plt.plot(x, y_cls, y_box)
plt.show()
| 2.671875 | 3 |
uam_simulator/agent.py | colineRamee/UAM_simulator_scitech2021 | 1 | 12793737 | import math
import numpy as np
from uam_simulator import my_utils
from uam_simulator import pathPlanning
from uam_simulator import orca
import gurobipy as grb
from gurobipy import GRB
import time as python_time
class Flightplan:
def __init__(self, t0, dt, positions, times=None):
self.start_time = t0
self.positions = positions
self.time_step = dt
self.times = None
if times is not None:
self.time_step = None
self.times = np.array(times)
else:
self.times = np.array([self.start_time + i * self.time_step for i in range(0, len(self.positions))])
if self.time_step is not None:
self.end_time = self.start_time + (len(self.positions) - 1) * self.time_step
else:
self.end_time=self.times[-1]
def get_planned_position_at(self, time, return_velocity=False, ignore_timed_out=False, debug=False):
""" Interpolates between the flight points """
if ignore_timed_out and time > self.end_time:
if return_velocity:
return None, None
else:
return None
n = len(self.positions)
if self.time_step is not None:
idx_float = (float(time) - float(self.start_time)) / float(self.time_step)
idx_low = min(math.floor(idx_float), n - 1)
idx_high = min(math.ceil(idx_float), n - 1)
if idx_low == idx_high:
# Indices are equal because idx_float is an int
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
else:
if time > self.times[-1]:
return np.copy(self.positions[-1])
idx_high = np.searchsorted(self.times, time)
idx_low = max(0, idx_high - 1)
if self.times[idx_high] == time or idx_low == idx_high:
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
idx_float = idx_low + (time - self.times[idx_low]) / (self.times[idx_high] - self.times[idx_low])
pos_high = self.positions[idx_high]
pos_low = self.positions[idx_low] # if time is exactly integer then returns the exact pos
if debug:
print(idx_float)
print(pos_high)
print(pos_low)
if return_velocity:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low), (pos_high-pos_low)/(self.times[idx_high]-self.times[idx_low])
else:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low)
def get_planned_trajectory_between(self, start_time, end_time, debug=False):
""" Returns trajectory between start_time and end_time"""
if (start_time - self.end_time) >= -1e-4 or (end_time - self.start_time) <= 1e-4:
return None, None
trajectory_end_time = min(end_time, self.end_time)
trajectory_start_time = max(start_time, self.start_time)
trajectory = []
times = []
if debug:
print('time step is '+str(self.time_step))
print('start_time is '+str(start_time))
print('end_time '+str(end_time))
print('positions '+str(self.positions))
print('times '+str(self.times))
print(start_time-self.end_time)
if self.time_step is None:
# self.times is sorted
[start_index, end_index] = np.searchsorted(self.times, [trajectory_start_time, trajectory_end_time])
temp = self.times[start_index]
if abs(self.times[start_index]-trajectory_start_time) > 1e-4:
# requires interpolation
# Since we already now the index we could avoid a second call to search sorted
trajectory.append(self.get_planned_position_at(trajectory_start_time))
times.append(trajectory_start_time)
for i in range(start_index, end_index):
trajectory.append(self.positions[i])
times.append(self.times[i])
# trajectory_end_time <= times[end_index]
if abs(self.times[end_index]-trajectory_end_time) > 1e-4:
# requires interpolation
trajectory.append(self.get_planned_position_at(trajectory_end_time))
times.append(trajectory_end_time)
else:
trajectory.append(self.positions[end_index])
times.append(trajectory_end_time)
else:
start_index_float = float((trajectory_start_time - self.start_time) / self.time_step)
end_index_float = float((trajectory_end_time - self.start_time) / self.time_step)
lower = math.ceil(start_index_float)
upper = min(math.floor(end_index_float), len(self.positions) - 1)
if lower != start_index_float:
pos_0 = self.get_planned_position_at(start_time)
trajectory.append(np.copy(pos_0))
times.append(trajectory_start_time)
for index in range(lower, upper + 1):
trajectory.append(self.positions[index])
# times.append(self.start_time+index*self.time_step)
times.append(self.times[index])
if upper != end_index_float:
pos_end = self.get_planned_position_at(end_time)
trajectory.append(pos_end)
times.append(trajectory_end_time)
return trajectory, times
def get_end_time(self):
if self.time_step is not None:
return self.start_time + (len(self.positions) - 1) * self.time_step
else:
return self.times[-1]
class Agent:
def __init__(self, env, radius, max_speed, start=None, end=None, start_time=0, agent_logic='dumb',
centralized_manager=None, algo_type=None, agent_dynamics=None, id=0, sensing_radius=10000,
flight_leg='initial'):
self.id = id
self.environment = env
self.centralized_manager = centralized_manager
self.agent_dynamics = agent_dynamics
if agent_logic == 'dumb':
protected_area = self.environment.get_protected_area()
else:
# All other agents can wait in place
protected_area = None
# Can't have random start and not random end (or vice versa)
if start is None or end is None:
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
# Play one more time
# print('agent start and goal are close, redrawing at random')
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
print('unlikely, agent start and goal are still close')
else:
self.start = start
self.goal = end
self.position = np.copy(self.start) # Passed by reference
self.new_position = np.copy(self.start)
self.radius = radius
self.orientation = 0
self.minSpeed = 0.0
self.maxSpeed = max_speed
self.sensing_radius = sensing_radius
self.desired_start_time = start_time
self.start_time = start_time # actual start time if a ground delay is planned
if np.linalg.norm(self.goal - self.start) == 0:
print(agent_logic)
print(start)
print(end)
print(np.linalg.norm(self.goal - self.start))
self.velocity = self.maxSpeed * (self.goal - self.start) / (np.linalg.norm(self.goal - self.start))
self.new_velocity = self.velocity
self.trajectory = []
self.trajectory_times = []
self.collision_avoidance_time = []
self.preflight_time = None
self.flightPlan = None
self.status = 'ok'
self.agent_logic = agent_logic
self.tolerance = self.environment.tolerance
self.t_removed_from_sim=None
if agent_logic == 'dumb':
self.ownship = False
else:
self.ownship = True
self.flight_status = 'initialized'
self.algo_type = algo_type
self.cumulative_density=0
self.density=0
self.n_steps=0
self.flight_leg=flight_leg
def get_predicted_end_time(self):
if self.flightPlan is not None:
return self.flightPlan.end_time
else:
print('Agent: in order to get the predicted end time a flight plan must exist')
return self.start_time
def compute_next_move(self, current_time, dt, debug=False, density=0):
""" Store the next position in self.new_position. The position is updated when move is called """
if self.agent_logic == 'dumb':
self.new_position = self.compute_straight_move(self.position, self.goal, self.maxSpeed, dt)
self.new_velocity = (self.new_position - self.position) / dt
if self.agent_logic == 'reactive':
self.cumulative_density += density
self.n_steps += 1
if self.algo_type is None:
self.algo_type = 'MVP'
self.new_velocity = self.collision_avoidance(dt, algo_type=self.algo_type)
self.new_velocity = self.velocity_update(self.new_velocity)
self.new_position += self.new_velocity * dt
if self.agent_logic == 'strategic':
# Follow flight plan (without consideration for kinematic properties)
self.new_position = self.flightPlan.get_planned_position_at(current_time + dt, debug=debug)
self.new_velocity = (self.new_position - self.position) / dt
if debug:
print('New position ' + str(self.new_position))
print('old position ' + str(self.position))
if self.trajectory == []:
self.trajectory.append(np.copy(self.position))
self.trajectory_times.append(current_time)
self.trajectory.append(np.copy(self.new_position))
self.trajectory_times.append(current_time + dt)
self.flight_status = 'ongoing'
def compute_straight_move(self, current_position, goal, speed, dt):
orientation = math.atan2(goal[1] - current_position[1], goal[0] - current_position[0])
d = np.linalg.norm(goal - current_position)
max_step_length = min(speed * dt, d) # slow down to arrive at the goal on the next time step
return current_position + np.array([math.cos(orientation), math.sin(orientation)]) * max_step_length
def move(self):
self.position = np.copy(self.new_position)
self.velocity = np.copy(self.new_velocity)
return self.position
def velocity_update(self, new_velocity):
# Introduce kinematic constraints
# For now just clamp the velocity and instantly change the orientation
v = np.linalg.norm(new_velocity)
v_clamped = my_utils.clamp(self.minSpeed, self.maxSpeed, v)
if self.agent_dynamics is None:
return new_velocity * v_clamped / v
else:
turn_angle = my_utils.get_angle(self.velocity, new_velocity)
max_angle=30*math.pi/180
if abs(turn_angle)>max_angle:
vel = self.velocity * v_clamped / np.linalg.norm(self.velocity)
theta=math.copysign(max_angle,turn_angle)
return vel @ np.asarray([[math.cos(theta), math.sin(theta)], [-math.sin(theta), math.cos(theta)]])
else:
return new_velocity * v_clamped / v
def preflight(self, dt, algo_type='Straight', density=0):
# Given, the start/goals and published flight plans of other agents find a free path and publish it
self.density = density
if self.centralized_manager is None:
print('agent.py preflight error, a centralized manager must exist')
if algo_type == 'Straight':
timer_start = python_time.time()
plan = []
plan.append(self.start)
pos = np.copy(self.start)
d = np.linalg.norm(self.goal - pos)
# Larger time steps require larger tolerance
# TODO tolerances are a bit of a mess
while d > self.maxSpeed * dt:
pos = self.compute_straight_move(pos, self.goal, self.maxSpeed, dt)
d = np.linalg.norm(self.goal - pos)
plan.append(pos)
if d != 0:
plan.append(self.goal)
self.flightPlan = Flightplan(self.start_time, dt, plan)
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'LocalVO':
timer_start = python_time.time()
local_planner = pathPlanning.Local_VO(self.start, self.goal, self.start_time, self.maxSpeed, self.centralized_manager, self.tolerance)
success, plan, times = local_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
## Debug
if len(times) < 2:
print('the plan is too short')
print('agent start '+ str(self.start))
print('agent goal '+str(self.goal))
print('agent plan pos '+str(plan))
print('agent plan times ' + str(times))
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'Decoupled':
timer_start = python_time.time()
decoupled_planner = pathPlanning.DecoupledApproach(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = decoupled_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'SIPP':
timer_start = python_time.time()
sipp_planner = pathPlanning.SIPP(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = sipp_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'A_star_8':
timer_start = python_time.time()
astar_planner = pathPlanning.AStar_8grid(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager)
success, plan, times = astar_planner.search()
if not success:
self.flight_status = 'cancelled'
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
else:
print('The algo type ' + algo_type + ' is not implemented')
def can_safely_take_off(self, t):
if self.algo_type == 'straight':
return True
neighbors = self.environment.get_neighbors(self.position, self.radius)
for vehicle in neighbors:
if t >= vehicle.start_time and vehicle.id != self.id:
# if np.linalg.norm(self.position - vehicle.position) <= self.radius:
self.flight_status = 'waiting'
return False
self.start_time = t
return True
def collision_avoidance(self, dt, algo_type='MVP'):
# Given current position, next flight plan goal and surrounding vehicles decide where to go
# Based on Hoekstra Bluesky simulator
# The returned velocity might not be feasible
if algo_type == 'MVP_Bluesky':
timer_start = python_time.time()
neighbors = self.get_neighbors()
velocity_change = np.asarray([0.0, 0.0])
direction = self.goal - self.position
d = np.linalg.norm(direction)
desired_velocity = min(self.maxSpeed, d / dt) * direction / d
safety_factor = 1.10 # 10% safety factor (as in The effects of Swarming on a Voltage Potential-Based Conflict Resolution Algorithm, <NAME>)
# if d<=self.radius:
# dV=0
# else:
for neighbor in neighbors:
# Find Time of Closest Approach
delta_pos = self.position - neighbor.position
dist=np.linalg.norm(delta_pos)
delta_vel = desired_velocity - neighbor.velocity
if np.linalg.norm(delta_vel)==0:
t_cpa=0
else:
t_cpa=-np.dot(delta_pos, delta_vel) / np.dot(delta_vel, delta_vel)
dcpa = delta_pos+delta_vel*t_cpa
dabsH = np.linalg.norm(dcpa)
# If there is a conflict
if dabsH < self.radius:
# If head-on conflict
if dabsH<=10:
dabsH=10
dcpa[0] = delta_pos[1] / dist * dabsH
dcpa[1] = -delta_pos[0] / dist * dabsH
if self.radius*safety_factor < dist:
erratum = np.cos(np.arcsin((self.radius*safety_factor) / dist) - np.arcsin(dabsH / dist))
dV =(((self.radius*safety_factor) / erratum - dabsH) * dcpa)/(abs(t_cpa)*dabsH)
else:
# If already moving away from conflict (tcpa is negative) then just keep going
if t_cpa<=0:
dV = 0
else:
dV =(self.radius*safety_factor - dabsH)*dcpa/(abs(t_cpa)*dabsH)
velocity_change += dV
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity + velocity_change
elif algo_type == 'VO':
timer_start = python_time.time()
intruders = self.get_neighbors()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
if d == 0:
print('VO, this should not happen')
print('distance to goal is 0')
desired_velocity = (self.goal - self.position) * speed / d
model = setupMIQCP(intruders, desired_velocity, self)
model.optimize()
if model.status != GRB.Status.OPTIMAL:
print('Error gurobi failed to find a solution')
print(model.status)
vars = model.getVars()
if intruders != []:
# plotter([-1000,1000],[-1000,1000],100,[get_VO(intruders[0],self)],chosen_v=np.array([vars[0].x,vars[1].x]))
pass
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return np.array([vars[0].x, vars[1].x])
elif algo_type == 'ORCA':
timer_start = python_time.time()
reactive_solver = orca.ORCA()
vel=reactive_solver.compute_new_velocity(self, dt)
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return vel
elif algo_type == 'straight':
timer_start = python_time.time()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
desired_velocity = (self.goal - self.position) * speed / d
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity
else:
print(algo_type+' not implemented ')
def get_neighbors(self):
neighbors = self.environment.get_neighbors(self.position, self.sensing_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def get_nearest_neighbors(self, k, max_radius):
# Will return itself so query one more neighbor
neighbors = self.environment.get_nearest_neighbors(self.position, k+1, max_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def finish_flight(self, t,goal_pos=None, t_removed_from_sim=None):
self.flight_status = 'finished'
self.arrival_time = t
self.t_removed_from_sim=t_removed_from_sim
if goal_pos is not None:
self.trajectory.append(np.copy(goal_pos))
self.trajectory_times.append(t)
def log_agent(self):
agent_log = {'flight_status': self.flight_status,
'agent_type': self.agent_logic,
'desired_time_of_departure': self.desired_start_time,
'agent_id':self.id}
if self.flight_status== 'finished' or self.flight_status == 'ongoing':
agent_log['actual_time_of_departure'] = self.start_time
if self.flight_status == 'finished':
ideal_length = float(np.linalg.norm(self.goal - self.start))
actual_length = 0
if self.trajectory == []:
print('agent, empty trajectory ') # happens if start and goal are really close
print(self.start)
print(self.goal)
pos_0 = self.trajectory[0]
for pos in self.trajectory:
d = np.linalg.norm(pos - pos_0)
actual_length += d
pos_0 = np.copy(pos)
direction = self.goal - self.start
heading = math.atan2(direction[1], direction[0])
if self.agent_logic == 'reactive':
self.density = self.cumulative_density/self.n_steps - 1
agent_log['flight_status']= self.flight_status
agent_log['agent_type']= self.agent_logic
agent_log['length_ideal']= ideal_length
agent_log['actual_length']= actual_length
agent_log['ideal_time_of_arrival']= self.desired_start_time+ideal_length / self.maxSpeed
agent_log['actual_time_of_arrival']= self.arrival_time
if self.t_removed_from_sim is not None:
agent_log['time_removed_from_sim']=self.t_removed_from_sim
agent_log['heading']= heading
agent_log['density']= self.density
if self.agent_logic == 'strategic':
agent_log['time_to_preflight'] = self.preflight_time
elif self.agent_logic == 'reactive':
agent_log['average_time_to_plan_avoidance'] = sum(self.collision_avoidance_time) / len(self.collision_avoidance_time)
agent_log['total_planning_time'] = sum(self.collision_avoidance_time)
return agent_log
def get_VO(intruder_agent, ownship_agent):
if intruder_agent == ownship_agent:
print('get_VO this should not happen intruder and ownship are the same')
rel_pos = intruder_agent.position - ownship_agent.position
d = np.linalg.norm(rel_pos)
if d == 0:
print('the distance between the two agents is 0')
if ownship_agent.radius > d:
print('there is an intruder in the protected radius')
print(ownship_agent.position)
print(intruder_agent.position)
alpha = math.asin(ownship_agent.radius / d) # VO cone half-angle (>=0)
theta = math.atan2(rel_pos[1], rel_pos[0])
vector1 = [math.cos(theta + alpha), math.sin(theta + alpha)]
vector2 = [math.cos(theta - alpha), math.sin(theta - alpha)]
# must be greater
normal_1 = np.array([vector1[1], -vector1[0]]) # Rotated +90 degrees
constraint1 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_1, normal_1)
# must be smaller
normal_2 = np.array([-vector2[1], vector2[0]]) # Rotated -90 degrees
constraint2 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_2, normal_2)
return constraint1, constraint2
def setupMIQCP(intruders, desired_vel, ownship_agent):
""" Intruders should be an array of agents """
model = grb.Model('VO')
max_vel = ownship_agent.maxSpeed
model.addVar(lb=-max_vel, ub=max_vel, name='x')
model.addVar(lb=-max_vel, ub=max_vel, name='y')
model.addVars(2 * len(intruders), vtype=GRB.BINARY)
model.update()
X = model.getVars()
n_intruder = 0
for intruder in intruders:
constraints_or = get_VO(intruder, ownship_agent)
n_constraint = 0
for constraint in constraints_or:
c = constraint(0, 0)
a = constraint(1, 0) - c
b = constraint(0, 1) - c
# K must be arbitrarily large so that when the binary constraint is 1 the constraint is always respected
K = abs(a * max_vel) + abs(b * max_vel) + c
model.addConstr(a * X[0] + b * X[1] - K * X[2 + 2 * n_intruder + n_constraint] <= -c)
n_constraint += 1
model.addConstr(X[2 + 2 * n_intruder] + X[2 + 2 * n_intruder + 1] <= 1)
n_intruder += 1
model.addConstr(X[0] * X[0] + X[1] * X[1] <= max_vel ** 2)
model.setObjective(
(X[0] - desired_vel[0]) * (X[0] - desired_vel[0]) + (X[1] - desired_vel[1]) * (X[1] - desired_vel[1]),
GRB.MINIMIZE)
model.setParam("OutputFlag", 0)
model.setParam("FeasibilityTol", 1e-9)
model.update()
return model | 2.796875 | 3 |
authentik/stages/user_delete/apps.py | BeryJu/passbook | 15 | 12793738 | <reponame>BeryJu/passbook
"""authentik delete stage app config"""
from django.apps import AppConfig
class AuthentikStageUserDeleteConfig(AppConfig):
"""authentik delete stage config"""
name = "authentik.stages.user_delete"
label = "authentik_stages_user_delete"
verbose_name = "authentik Stages.User Delete"
| 1.5625 | 2 |
analyze.py | satamame/pscscrape | 0 | 12793739 | # %%
import os
import matplotlib.pyplot as plt
# %% 設定
datasets = ['hariko/scripts',]
# %% "「" を含む行数をカウントする関数
def count_kakko(f_path):
'''行数と "「" を含む行数を取得する
parameters
----------
f_path : str
調査するファイルのパス名
returns
-------
lines : int
ファイルの行数
lines_with_kakko
"「" を含む行数
'''
lines = 0
lines_with_kakko = 0
with open(f_path, encoding='utf-8') as f:
while True:
l = f.readline()
if not l:
break
if '「' in l:
lines_with_kakko += 1
lines += 1
return (lines, lines_with_kakko)
# %% すべてのファイルについて調べる
params = []
for set_dir in datasets:
files = os.listdir(path=set_dir)
for f in files:
f_path = os.path.abspath(os.path.join(set_dir, f))
if os.path.isfile(f_path):
params.append(count_kakko(f_path))
# %% 可視化する
(x, y) = zip(*params)
plt.scatter(x, y)
# %% y を行数ではなく割合で表示
y = [y/x for (x, y) in params]
plt.scatter(x, y, alpha=0.3)
| 2.40625 | 2 |
source/cognidron/interfaces/eeg/eegInterfaz.py | dregmli/cognidron | 1 | 12793740 | <gh_stars>1-10
# Para usar esta clase como una interfaz en POO se utiliza esta libreria
# mas info en: https://www.python-course.eu/python3_abstract_classes.php
from abc import ABC, abstractmethod
class EegInterfaz(ABC):
"""
Interfaz abstracta que debe implementarse para conectar el cognidron con el dispositivo de EEG.
"""
conectado = False
# para saber si se encuentra conectado con el dispositivo EEG
def __init__(self):
pass
@abstractmethod
def iniciarConexion(self):
pass
@abstractmethod
def cerrarConexion(self):
pass
@abstractmethod
def enviarMensaje(self, mensaje):
pass
@abstractmethod
def recibirMensaje(self):
return ""
| 3.328125 | 3 |
tpDcc/tools/unittests/core/toolset.py | tpDcc/tpDcc-tools-unittest | 0 | 12793741 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Toolset implementation for tpDcc-tools-unittests
"""
from __future__ import print_function, division, absolute_import
from tpDcc.libs.qt.widgets import toolset
class UnitTestsToolsetWidget(toolset.ToolsetWidget, object):
def __init__(self, *args, **kwargs):
self._unit_test_paths = kwargs.get('unit_test_paths', list())
self._unit_test_paths.extend([r'D:\tpDcc\tpDcc-libs-nameit\tests'])
super(UnitTestsToolsetWidget, self).__init__(*args, **kwargs)
def contents(self):
from tpDcc.tools.unittests.core import model, view, controller
unit_test_model = model.UnitTestModel()
unit_test_controller = controller.UnitTestController(client=self._client, model=unit_test_model)
unit_test_view = view.UnitTestView(
unit_test_paths=self._unit_test_paths, model=unit_test_model, controller=unit_test_controller, parent=self)
return [unit_test_view]
| 2.015625 | 2 |
examples/example_GenericSignal.py | ishine/acoular | 294 | 12793742 | # -*- coding: utf-8 -*-
"""
"""
from pylab import *
from acoular import *
# files
datafile = 'example_data.h5'
t1 = MaskedTimeSamples(name=datafile)
t1.start = 0 # first sample, default
t1.stop = 16000 # last valid sample = 15999
invalid = [1,7] # list of invalid channels (unwanted microphones etc.)
t1.invalid_channels = invalid
t2 = ChannelMixer(source=t1)
sig = GenericSignalGenerator(source=t2)
plot(sig.signal())
show() | 2.25 | 2 |
gcn/subsample.py | floregol/gcn | 1 | 12793743 | import random
import time
import numpy as np
import copy
from itertools import compress
random.seed(123)
#remove columns from adj matrix.
#TODO needs additional scaling?
#Be carefull too not modify the initial complete support matrix
def get_sub_sampled_support(complete_support, node_to_keep):
index_array = complete_support[0][:] # make a copy to avoid modifying complete support
values = np.zeros(complete_support[1].shape)
index_array_sorted = index_array[:, 1].argsort()
j = 0
node_to_keep.sort()
for index_to_keep in node_to_keep:
while (j < len(index_array_sorted) and index_to_keep >= index_array[index_array_sorted[j]][1]):
if (index_to_keep == index_array[index_array_sorted[j]][1]):
values[index_array_sorted[j]] = complete_support[1][index_array_sorted[j]]
j += 1
sub_sampled_support = (index_array, values, complete_support[2])
return sub_sampled_support
# Return a train mask for label_percent of the trainig set.
# if maintain_label_balance, keep smallest number of labels per class in training set that respect the label_percent, except for 100 %
def get_train_mask(label_percent, y_train, initial_train_mask, maintain_label_balance=False):
train_index = np.argwhere(initial_train_mask).reshape(-1)
train_mask = np.zeros((initial_train_mask.shape), dtype=bool) # list of False
if maintain_label_balance:
ones_index = []
for i in range(y_train.shape[1]): # find the ones for each class
ones_index.append(train_index[np.argwhere(y_train[train_index, i] > 0).reshape(-1)])
if label_percent < 100:
smaller_num = min(
int(len(l) * (label_percent / 100))
for l in ones_index) # find smaller number of ones per class that respect the % constraint
for ones in ones_index:
random_index = random.sample(list(ones), smaller_num)
train_mask[random_index] = True # set the same number of ones for each class, so the set is balanced
else:
for ones in ones_index:
train_mask[ones] = True
else:
random_sampling_set_size = int((label_percent / 100) * train_index.shape[0])
random_list = random.sample(list(train_index), random_sampling_set_size)
train_mask[random_list] = True
label_percent = (100 * np.sum(train_mask) / train_index.shape[0])
return train_mask, label_percent
#returns a random list of indexes of the node to be kept at random.
def get_random_percent(num_nodes, percent):
if percent > 100:
print("This is not how percentage works.")
exit()
random_sampling_set_size = int((percent * num_nodes) / 100)
return random.sample(range(num_nodes), random_sampling_set_size)
#returns a list of indexes for the mask
def get_list_from_mask(mask):
return list(compress(range(len(mask)), mask))
# Set features of node that shouldn't be in the set to crazy things to make sure they are not in the gcnn
def modify_features_that_shouldnt_change_anything(features, note_to_keep):
note_doesnt_exist = [x for x in range(features[2][0]) if x not in note_to_keep]
a = np.where(np.isin(features[0][:, 0], note_doesnt_exist))
features[1][a[0]] = 10000000
| 2.28125 | 2 |
utils/add_entry.py | capitanu/DD2477-project | 0 | 12793744 | <gh_stars>0
#!/usr/bin/env python
from elasticsearch import Elasticsearch
es = Elasticsearch()
entry = {
"word" : "the",
"list": [
{
"docID" : "3",
"offsetlist" : ["3", "5", "14", "15"]
},
{
"docID" : "14",
"offsetlist" : ["2", "8", "27", "108"]
}
],
}
es.delete(index="summary-index", id="8babHoABsMW6PCgq_YhM")
es.delete(index="summary-index", id="8LaaHoABsMW6PCgqW4iw")
es.index(index="summary-index", body=entry)
| 2.578125 | 3 |
features/steps/fizz_buzz_steps.py | lazaromer97/fizz-buzz-test | 1 | 12793745 | <gh_stars>1-10
from behave import given, when, then
def fizz_buzz(since: int, until: int) -> dict:
data = {}
for num in range(since, until+1):
if num % 15 == 0:
data[str(num)] = 'FizzBuzz'
elif num % 5 == 0:
data[str(num)] = 'Buzz'
elif num % 3 == 0:
data[str(num)] = 'Fizz'
else:
data[str(num)] = num
return data
@then(u'the fizz_buzz(1, 3) test run')
def test_1_3(context):
assert fizz_buzz(1, 3) == {'1':1, '2':2, '3':'Fizz'}
@then(u'the fizz_buzz(4, 6) test run')
def test_4_6(context):
assert fizz_buzz(4, 6) == {'4':4, '5':'Buzz', '6':'Fizz'}
@then(u'the fizz_buzz(14, 16) test run')
def test_14_16(context):
assert fizz_buzz(14, 16) == {'14':14, '15':'FizzBuzz', '16':16}
| 2.90625 | 3 |
openspeech/datasets/librispeech/preprocess/subword.py | CanYouImagine/openspeech | 207 | 12793746 | # MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sentencepiece as spm
import shutil
from typing import Tuple
from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts
SENTENCEPIECE_MODEL_NAME = "sp"
def _prepare_tokenizer(train_transcripts, vocab_size):
""" Prepare sentencepice tokenizer """
input_file = 'spm_input.txt'
model_type = 'unigram'
with open(input_file, 'w') as f:
for transcript in train_transcripts:
f.write(f"{transcript.split('|')[-1]}\n")
spm.SentencePieceTrainer.Train(f"--input={input_file} "
f"--model_prefix={SENTENCEPIECE_MODEL_NAME} "
f"--vocab_size={vocab_size} "
f"--model_type={model_type} "
f"--pad_id=0 "
f"--bos_id=1 "
f"--eos_id=2 "
f"--unk_id=3 "
f"--user_defined_symbols=<blank>")
def generate_manifest_files(dataset_path: str, manifest_file_path: str, vocab_path: str, vocab_size: int) -> None:
"""
Generate manifest files.
Format: {audio_path}\t{transcript}\t{numerical_label}
Args:
vocab_size (int): size of subword vocab
Returns:
None
"""
transcripts_collection = collect_transcripts(dataset_path)
_prepare_tokenizer(transcripts_collection[0], vocab_size)
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.model", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.vocab", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.vocab"))
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
with open(manifest_file_path, 'w') as f:
for idx, part in enumerate(['train-960', 'dev-clean', 'dev-other', 'test-clean', 'test-other']):
for transcript in transcripts_collection[idx]:
audio_path, transcript = transcript.split('|')
text = " ".join(sp.EncodeAsPieces(transcript))
label = " ".join([str(item) for item in sp.EncodeAsIds(transcript)])
f.write(f"{audio_path}\t{text}\t{label}\n")
| 2.09375 | 2 |
appg/views/admin_list_examples.py | poxstone/appg | 0 | 12793747 | <filename>appg/views/admin_list_examples.py
# -*- coding: utf-8 -*-
from flask.views import View
from flask import flash, redirect, url_for, jsonify, render_template, request
import json
from google.appengine.api import users
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from forms import ExampleForm
from models import ExampleModel
from decorators import login_required
from utils import to_dict
class PIni(View):
@login_required
def dispatch_request(self):
examples = ExampleModel.query()
return render_template('ini.html', examples=examples )
class PIni_list(View):
#@login_required
def dispatch_request(self):
list = ExampleModel.query().fetch()
list = to_dict(list)
return jsonify(results = list)
class PIni_delete(View):
@login_required
def dispatch_request(self):
if request.method == "POST":
form = request.get_json()
example_id = form['example_id']
if example_id:
example = ExampleModel.get_by_id( int(example_id) )
if example:
try:
example.key.delete()
return jsonify(result = {'status':'ok'})
except CapabilityDisabledError:
return jsonify(result = {'status':'Error to save'})
else:
return jsonify(result = {'status':'NOT item found'})
else:
return jsonify(result = {'status':'no example_example send'})
else:
return jsonify(result = {'status':'NOT is correct method'})
class PIni_put(View):
@login_required
def dispatch_request(self):
if request.method == "POST":
form = request.get_json()
example = ExampleModel(
example_name = form['example_name'],
example_description = form['example_description'],
added_by = users.get_current_user()
)
try:
example.put()
example_id = example.key.id()
return jsonify(result = {'status':'ok'})
except CapabilityDisabledError:
return jsonify(result = {'status':'DONT cant save'})
else:
return jsonify(result = {'status':'NOT is correct method'})
class PIni_update(View):
@login_required
def dispatch_request(self):
if request.method == "POST":
form = request.get_json()
example_id = form['example_id']
if example_id:
example = ExampleModel.get_by_id( int(example_id) )
if example:
try:
example.example_name = form['example_name']
example.example_description = form['example_description']
example.put()
return jsonify(result = {'status':'ok'})
except CapabilityDisabledError:
return jsonify(result = {'status':'Error to update'})
else:
return jsonify(result = {'status':'NOT item found'})
else:
return jsonify(result = {'status':'no example_example send'})
else:
return jsonify(result = {'status':'NOT is correct method'})
| 2.1875 | 2 |
tests/auth/test_auth_models.py | ethanaggor/twitter-clone | 0 | 12793748 | <reponame>ethanaggor/twitter-clone
"""Tests for `app.auth.models.User` class."""
import pytest
from werkzeug.exceptions import HTTPException
from app import db
from app.auth.models import User
from tests.conftest import CleanTestingMixin
class TestUserExistence(CleanTestingMixin):
"""Test the existence of our `User` class."""
def test_model_exists(self):
"""Does our model exist?"""
assert User.__table__ is not None
def test_model_write(self, app):
"""Can our model be used to write data to the DB?"""
with app.app_context():
new_user = User(
username='Test',
email='<EMAIL>',
password='',
)
db.session.add(new_user)
db.session.commit()
extracted_user = User.query.first()
assert extracted_user is not None
assert extracted_user.username == 'Test'
assert extracted_user.email == '<EMAIL>'
class TestUserFields(CleanTestingMixin):
"""Test the fields on the `User` class."""
@pytest.fixture()
def columns(self):
"""All columns on the `User` table."""
return list(User.__table__.columns)
@pytest.fixture()
def column_keys(self, columns):
"""All keys for the columns on the `User` table."""
return list(map(lambda c: c.key, columns))
def test_model_id(self, columns, column_keys):
"""Does our model have our specified `id` field?"""
column = columns[column_keys.index('id')]
assert 'id' in column_keys
assert isinstance(column.type, db.Integer)
assert column.primary_key
assert column.autoincrement
def test_model_username(self, columns, column_keys):
"""Does our model have our specified `username` field?"""
column = columns[column_keys.index('username')]
assert 'username' in column_keys
assert isinstance(column.type, db.String)
assert column.type.length == 15
assert column.unique
"""
def test_model_phone(self, columns, column_keys):
\"\"\"Does our model have our specified `phone` field?\"\"\"
column = columns[column_keys.index('phone')]
assert 'phone' in column_keys
assert isinstance(column.type, db.String)
assert column.type.length == 30
assert column.unique
"""
def test_model_email(self, columns, column_keys):
"""Does our model have our specified `email` field?"""
column = columns[column_keys.index('email')]
assert 'email' in column_keys
assert isinstance(column.type, db.String)
assert column.type.length == 100
assert column.unique
def test_model_password(self, columns, column_keys):
"""Does our model have our specified `password` field?"""
column = columns[column_keys.index('password')]
assert 'password' in column_keys
assert isinstance(column.type, db.String)
assert column.type.length == 256
class TestUserHelpers(CleanTestingMixin):
"""Test the helper methods for the `User` class."""
def test_model_create(self, app):
"""Does our static method `User.create()` store information in the DB?"""
with app.app_context():
User.create(name='tester',
username='testing',
email='<EMAIL>',
password='<PASSWORD>'
)
user = User.query.first()
assert user is not None
assert user.name == 'tester'
assert user.username == 'testing'
assert user.email == '<EMAIL>'
assert user.password != '<PASSWORD>'
def test_model_pwd_hash(self, app):
"""Does our static method `User.create()` use bcrypt to hash the password?"""
with app.app_context():
user = User.query.first()
assert user is not None
# This is the current bcrypt algorithm signature (Dec. 2020)
assert user.password[0:4] == <PASSWORD>$'
def test_model_authenticate(self, app):
"""Does our static method `User.authenticate()` retrieve an existing user given a correct username/PW combo?"""
with app.app_context():
user = User.query.first()
att_user = User.authenticate('testing', '<PASSWORD>')
assert att_user is not None
assert user.id == att_user.id
assert user.username == att_user.username
assert user.password == att_user.password
def test_model_unauth(self, app):
"""Does our static method `User.authenticate()` fail properly when given an invalid username/PW combo?"""
with app.app_context():
# Non existent username:
att_user = User.authenticate('asdf', 'asdf')
assert att_user is None
# Existing username but bad password:
att_user = User.authenticate('testing', 'asdf')
assert att_user is None
# Correct password but non existing username:
att_user = User.authenticate('asdf', '<PASSWORD>')
assert att_user is None
def test_model_get_by_username(self, app):
"""Does our static method `User.get_by_username_or_404` retrieve an existing user given a correct username?"""
with app.app_context():
user = User.query.first()
testing_user = User.get_by_username_or_404('testing')
assert testing_user == user
def test_model_get_by_username_fail(self, app):
"""Does our static method `User.get_by_username_or_404` correctly 404 given a non-existing username?"""
with app.app_context():
try:
User.get_by_username_or_404('asdf')
except HTTPException as e:
assert e.response is None
assert e.description == "Resource not found."
| 2.796875 | 3 |
openmdao/examples/subproblem_example.py | naylor-b/OpenMDAO1 | 17 | 12793749 |
import sys
from math import pi
from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, \
ScipyOptimizer, SubProblem, CaseDriver
class MultiMinGroup(Group):
"""
In the range -pi <= x <= pi
function has 2 local minima, one is global
global min is: f(x) = -1.31415926 at x = pi
local min at: f(x) = -0.69084489952 at x = -3.041593
"""
def __init__(self):
super(MultiMinGroup, self).__init__()
self.add('indep', IndepVarComp('x', 0.0))
self.add("comp", ExecComp("fx = cos(x)-x/10."))
self.connect("indep.x", "comp.x")
def main(num_par_doe):
# First, define a Problem to be able to optimize our function.
sub = Problem(root=MultiMinGroup())
# set up our SLSQP optimizer
sub.driver = subdriver = ScipyOptimizer()
subdriver.options['optimizer'] = 'SLSQP'
subdriver.options['disp'] = False # disable optimizer output
# In this case, our design variable is indep.x, which happens
# to be connected to the x parameter on our 'comp' component.
subdriver.add_desvar("indep.x", lower=-pi, upper=pi)
# We are minimizing comp.fx, so that's our objective.
subdriver.add_objective("comp.fx")
# Now, create our top level problem
prob = Problem(root=Group())
prob.root.add("top_indep", IndepVarComp('x', 0.0))
# add our subproblem. Note that 'indep.x' is actually an unknown
# inside of the subproblem, but outside of the subproblem we're treating
# it as a parameter.
prob.root.add("subprob", SubProblem(sub, params=['indep.x'],
unknowns=['comp.fx']))
prob.root.connect("top_indep.x", "subprob.indep.x")
# use a CaseDriver as our top level driver so we can run multiple
# separate optimizations concurrently. We'll run 'num_par_doe'
# concurrent cases. In this case we need no more than 2 because
# we're only running 2 total cases.
prob.driver = CaseDriver(num_par_doe=num_par_doe)
prob.driver.add_desvar('top_indep.x')
prob.driver.add_response(['subprob.indep.x', 'subprob.comp.fx'])
# these are the two cases we're going to run. The top_indep.x values of
# -1 and 1 will end up at the local and global minima when we run the
# concurrent subproblem optimizers.
prob.driver.cases = [
[('top_indep.x', -1.0)],
[('top_indep.x', 1.0)]
]
prob.setup(check=False)
# run the concurrent optimizations
prob.run()
# collect responses for all of our input cases
optvals = [dict(resp) for resp, success, msg in prob.driver.get_responses()]
# find the minimum value of subprob.comp.fx in our responses
global_opt = sorted(optvals, key=lambda x: x['subprob.comp.fx'])[0]
return global_opt
if __name__ == '__main__':
global_opt = main(2)
print("\nGlobal optimum:\n subprob.comp.fx = %s at subprob.indep.x = %s" %
(global_opt['subprob.comp.fx'], global_opt['subprob.indep.x']))
| 3.09375 | 3 |
modules/utils/br_feat_orc_creator.py | guilhermemg/trace-links-tc-br | 0 | 12793750 | # utilitary functions to create the expert and volunteers oracles from the taskruns dataset
import pandas as pd
from modules.utils import aux_functions
from modules.utils import firefox_dataset_p2 as fd
class Br_Feat_Oracle_Creator:
def __init__(self, bugreports, features):
self.bugreports = bugreports
self.features = features
def __shift_taskruns_answers(self, taskruns):
new_answers = list(taskruns.answers.values)
new_answers = [new_answers[-1]] + new_answers
del new_answers[-1]
taskruns['new_answers'] = new_answers
return taskruns
def __create_exp_feat_br_matrix(self, expert_taskruns):
taskruns_expert = self.__shift_taskruns_answers(expert_taskruns)
taskruns_expert.sort_values(by='bug_id', inplace=True)
taskruns_expert = taskruns_expert[(taskruns_expert.bug_id != 1181835) & (taskruns_expert.bug_id != 1315514)] # drop taskrun lost during empirical study
feat_br_matrix = pd.DataFrame(columns=self.features.feat_name.values,
index=self.bugreports.Bug_Number)
feat_br_matrix.index.names = ['bug_number']
for idx,row in taskruns_expert.iterrows():
ans = row.new_answers.split(" ")
for i in range(len(ans)-2): # -2 ==> dropped features from branch 65
feat_name = feat_br_matrix.columns[i]
feat_br_matrix.at[row.bug_id, feat_name] = int(ans[i])
return feat_br_matrix
def create_br_feat_expert_matrix(self, expert_taskruns):
feat_br_matrix = self.__create_exp_feat_br_matrix(expert_taskruns)
fd.Feat_BR_Oracles.write_feat_br_expert_df(feat_br_matrix)
def create_br_feat_expert_2_matrix(self, expert_taskruns):
feat_br_matrix = self.__create_exp_feat_br_matrix(expert_taskruns)
fd.Feat_BR_Oracles.write_feat_br_expert_2_df(feat_br_matrix)
def create_br_feat_volunteers_matrix(self, taskruns_volunteers_1, taskruns_volunteers_2):
ignored_taskruns = [154, 155, 156, 157, 169, 170, 171, 172, 183, 184, 196,
197, 198, 199, 200, 201, 202, 203, 204, 206, 241, 242,
253, 264, 265, 266, 267, 268, 269, 270]
taskruns_volunteers_1 = self.__shift_taskruns_answers(taskruns_volunteers_1)
taskruns_volunteers_2 = self.__shift_taskruns_answers(taskruns_volunteers_2)
taskruns = pd.concat([taskruns_volunteers_1, taskruns_volunteers_2])
taskruns.sort_values(by='bug_id', inplace=True)
taskruns = taskruns[(taskruns.bug_id != 1181835) & (taskruns.bug_id != 1315514)] # drop taskrun lost during empirical study
not_ignored_taskruns = [t_id for t_id in taskruns.id.values if t_id not in ignored_taskruns]
taskruns = taskruns[taskruns.id.isin(not_ignored_taskruns)]
feat_br_matrix = pd.DataFrame(columns=self.features.feat_name.values,
index=self.bugreports.Bug_Number)
feat_br_matrix.index.names = ['bug_number']
for idx,row in taskruns.iterrows():
ans = row.new_answers.split(" ")
for i in range(len(ans)-2): # -2 ==> dropped features from branch 65
feat_name = feat_br_matrix.columns[i]
feat_br_matrix.at[row.bug_id, feat_name] = int(ans[i])
fd.Feat_BR_Oracles.write_feat_br_volunteers_df(feat_br_matrix)
| 2.515625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.