filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_20261 | from .render import (
render_choice,
)
from ..models.model import get_track
EXAMPLE_COMMAND = "info"
def handle_command(command, event, bot):
"""
Executes bot command if the command is known
"""
print('slack::cmd::{}'.format(command))
success, response = True, None
cmd_list = command.split(' ')
cmd = cmd_list[0].lower()
args = cmd_list[1:] if len(cmd_list) else 0
if cmd == 'help':
response, success = handle_command_help()
if cmd == 'genres':
response, success = handle_command_genres(args, event, bot)
if cmd == 'songs':
response, success = handle_command_songs(args, event, bot)
if cmd == 'map':
response, success = handle_command_map(args, event, bot)
if cmd == 'self':
response, success = handle_command_self(args, event, bot)
if 'reaction_' in cmd:
response, success = handle_command_reaction(args, event, bot)
print('slack::cmd::{}::success::{}'.format(command, success))
return success, response
def handle_command_help():
return """
Hi there, I'm a music curation agent
Here's what I can do:
\t*info*: this screen
\t*accounts*: list account balances
\t*genres*: list genres
\t*genres* add <#genre>: add #genre to the list of genres
\t*songs*: list songs
\t*songs* add _<spotifyURI>_: add song to the list of songs by _<spotifyURI>_
\t*songs* get _<spotifyURI>_: get song with _<spotifyURI>_
\t*map* _<spotifyURI>_ <#genre>: map a song to a genre
Not sure? try *songs* or *genres*
""", True
def handle_command_genres(args, event, bot, limit=3):
if len(args) == 0:
return handle_command_list_genres(args, bot, limit)
elif args[0] == 'add':
return handle_command_add_genre(args, event, bot)
return None, False
def handle_command_list_genres(args, bot, limit=3):
genres = bot.sorted_genres
text = """
{} genres loaded, here are the latest {}:
""".format(len(genres), limit)
attachments = [
genre.render(bot)
for genre in genres[::-1][:limit]
]
return {
'text': text,
'attachments': attachments
}, True
def handle_command_add_genre(args, event, bot):
tx = bot.put('genre', event)
bot.pull(tx_id=tx['id'])
bot.store['active']['genre'] = -1
attachments = [
bot.active_genre.render(bot),
render_choice(['propose'], bot)
]
text = "Genre {} added".format(bot.active_genre.value)
return {
'text': text,
'attachments': attachments
}, True
def handle_command_songs(args, event, bot, limit=10):
if len(args) == 0:
return handle_command_list_songs(args, bot, limit)
elif args[0] == 'add':
return handle_command_add_song(args, event, bot)
elif args[0] == 'get':
return handle_command_get_song(args, event, bot)
return None, False
def handle_command_list_songs(args, bot, limit):
songs = bot.sorted_songs
text = """
{} songs loaded, here are the latest {}:
""".format(len(songs), limit)
attachments = [
song.render(bot, size='small')
for song in songs[::-1][:limit]
]
return {
'text': text,
'attachments': attachments
}, True
def handle_command_add_song(args, event, bot):
track_uri = args[-1][1:-1]
try:
event['metadata'] = get_track(bot.connections['spotify'], track_uri)
except Exception as e:
event['metadata'] = {}
print(e)
event['metadata']['uri'] = track_uri
tx = bot.put('song', event)
bot.pull(tx_id=tx['id'])
bot.store['active']['song'] = -1
attachments = [
bot.active_song.render(bot),
# render_choice(['propose'], bot)
]
text = "Song {} added".format(bot.active_song.title)
return {
'text': text,
'attachments': attachments
}, True
def handle_command_get_song(args, event, bot):
song = None
song_id = args[1]
if 'spotify:track' in song_id:
song = get_song_by_uri(bot, song_id[1:-1])
else:
try:
song = bot.store['songs'][song_id]
except Exception as e:
print(e)
if not song:
handle_command_add_song([song_id], event, bot)
song = get_song_by_uri(bot, song_id[1:-1])
bot.pull(tx_id=song.id)
song = get_song_by_uri(bot, song.uri)
attachments = [
song.render(bot),
# render_choice(['withdraw', 'challenge'], bot)
]
text = "Song *{} - {}*".format(song.artist, song.title)
return {
'text': text,
'attachments': attachments
}, True
def handle_command_map(args, event, bot):
handle_command_get_song(['get', *args], event, bot)
song = get_song_by_uri(bot, args[0][1:-1])
genre = get_genre_by_name(bot, args[1])
if not genre:
handle_command_add_genre([], event, bot)
genre = get_genre_by_name(bot, args[1])
event['map'] = genre.value
bot.pull(tx_id=song.id)
bot.put('map', event, song.recent)
bot.pull(tx_id=song.id)
attachments = [
bot.active_song.render(bot),
# render_choice(['withdraw', 'challenge'], bot)
]
text = "Song *{} - {}* mapped to genre *{}*".format(song.artist, song.title, genre.value)
return {
'text': text,
'attachments': attachments
}, True
def handle_command_self(args, event, bot):
if 'attachments' in event \
and len(event['attachments']) == 1 \
and event['text'][:4] == 'Song':
bot.connections['slack'].api_call(
"reactions.add",
channel=event['channel'],
name="thumbsup",
timestamp=event['ts']
)
bot.connections['slack'].api_call(
"reactions.add",
channel=event['channel'],
name="thumbsdown",
timestamp=event['ts']
)
bot.store['messages'][event['ts']] = event
return None, True
def handle_command_reaction(args, event, bot):
msg = bot.store['messages'].get(event['item']['ts'])
if msg:
song = get_song_by_uri(bot, msg['attachments'][0]['author_name'])
bot.pull(tx_id=song.id)
event['reaction'] = args[0]
bot.put('reaction', event, song.recent)
bot.pull(tx_id=song.id)
text = "Reaction *{}* added to song *{} - {}*".format(args[0], song.artist, song.title)
return {
'text': text,
'attachments': []
}, True
return None, True
def get_song_by_uri(bot, uri):
song = [s for s in bot.store['songs'].values() if uri in s.uri]
if len(song) > 0:
return song[0]
return None
def get_genre_by_name(bot, genre):
genres = [g for g in bot.store['genres'].values() if genre == g.value]
if len(genres) > 0:
return genres[0]
return None
|
the-stack_0_20262 | from logging import getLogger
from huey import crontab
from huey.contrib.djhuey import periodic_task
from sales.models import ItemSale
logger = getLogger('django.server')
@periodic_task(crontab(minute='0', hour='*/12'))
def close_completed_item_sales():
item_sales = ItemSale.objects.filter(platform_paid=True, sale_finalized=True)
for sale in item_sales:
logger.info(f'[INFO] Deleting item #{sale.item.id} and all accompanying bids, sales, meta, etc.')
sale.item.delete()
@periodic_task(crontab(minute='*'))
def close_cancelled_item_sales():
item_sales = ItemSale.objects.filter(sale_cancelled=True, payment_refunded=True)
for sale in item_sales:
logger.info(f'[INFO] Deleting sale #{sale.id}.')
sale.delete() |
the-stack_0_20265 | arr = []
print('Введите кол-во элементов массива и его элементы')
for a in range(0, int(input())):
# t = int(input())
arr.append(int(input()))
# my_list.sort()
print('my_list =', arr, 'type: ', type(arr)) # list check
def findSmaller(arr):
smallest = arr[0]
smallest_index = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index
def selectionSort(arr):
newArr = []
for i in range(len(arr)):
smallest = findSmaller(arr)
newArr.append(arr.pop(smallest))
return newArr
print('smaller_index:', findSmaller(arr))
print('sorted list:', selectionSort(arr))
|
the-stack_0_20266 | #!/usr/bin/python
#
# 2019 Graham R Pugh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for LastRecipeRunChecker class"""
import json
import os.path
from autopkglib import Processor, ProcessorError # pylint: disable=import-error
__all__ = ["LastRecipeRunChecker"]
class LastRecipeRunChecker(Processor):
"""An AutoPkg pre-processor which reads the output from the LastRecipeRunResult processor from
a different AutoPkg recipe, so that they can be used in the foillowing processes."""
input_variables = {
"recipeoverride_identifier": {
"description": "The identifier of the recipe from which the information is required.",
"required": True,
},
"cache_dir": {
"description": "Path to the cache dir.",
"required": False,
"default": "~/Library/AutoPkg/Cache",
},
"info_file": {
"description": ("Name of input file."),
"required": False,
"default": "latest_version.json",
},
}
output_variables = {
"url": {"description": ("the download URL.")},
"version": {"description": ("The current package version.")},
"license_key": {"description": ("The outputted value for license_key.")},
"pkg_path": {"description": ("the package path.")},
"pkg_name": {"description": ("the package name.")},
"pkg_uploaded": {
"description": ("whether a package was uploaded on the last run or not.")
},
"pkg_metadata_updated": {
"description": (
"whether package metadata was updated on the last run or not."
)
},
"PKG_CATEGORY": {"description": ("The package category.")},
"LAST_RUN_POLICY_NAME": {"description": ("The policy_name.")},
"LAST_RUN_SELFSERVICE_DESCRIPTION": {
"description": ("The self-service description.")
},
}
description = __doc__
def get_latest_recipe_run_info(self, cache_dir, identifier, info_file):
"""get information from the output files of a LastRecipeRunResult processor"""
try:
info_filepath = os.path.join(cache_dir, identifier, info_file)
with open(info_filepath, "r") as fp:
data = json.load(fp)
except (IOError, ValueError):
raise ProcessorError("No package or version information found")
else:
return data
def main(self):
identifier = self.env.get("recipeoverride_identifier")
cache_dir = os.path.expanduser(self.env.get("cache_dir"))
info_file = self.env.get("info_file")
# make sure all the values were obtained from the file
data = self.get_latest_recipe_run_info(cache_dir, identifier, info_file)
self.env["version"] = data.get("version")
self.env["license_key"] = data.get("license_key")
self.env["pkg_name"] = data.get("pkg_name")
self.env["pkg_uploaded"] = data.get("pkg_uploaded")
self.env["pkg_metadata_updated"] = data.get("pkg_metadata_updated")
if not self.env["version"] or not self.env["pkg_name"]:
raise ProcessorError("No package or version information found")
self.env["pkg_path"] = data["pkg_path"]
self.env["url"] = data.get("url")
self.env["PKG_CATEGORY"] = data.get("category")
self.env["LAST_RUN_POLICY_NAME"] = data.get("policy_name")
self.env["LAST_RUN_SELFSERVICE_DESCRIPTION"] = data.get(
"self_service_description"
)
# make sure the package actually exists
if not os.path.exists(self.env["pkg_path"]):
raise ProcessorError(
"Package does not exist: {}".format(self.env["pkg_path"])
)
self.output(f"Package name: {data['pkg_name']}")
self.output(f"Package path: {data['pkg_path']}")
self.output(f"Version: {data['version']}")
self.output(f"URL: {data['url']}")
self.output(f"Pkg category: {data['category']}")
self.output(f"Policy name: {data['policy_name']}")
self.output(f"Self Service Description: {data['self_service_description']}")
self.output(f"License Key: {data['license_key']}")
self.output(f"Package uploaded: {data['pkg_uploaded']}")
self.output(f"Package metadata updated: {data['pkg_metadata_updated']}")
if __name__ == "__main__":
PROCESSOR = LastRecipeRunChecker()
PROCESSOR.execute_shell()
|
the-stack_0_20267 | # Copyright © 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dharmendra G Patel <[email protected]>
#
"""Test DB Communicator."""
import os
import json
import pytest
import unittest
from unittest.mock import patch
from bayesian.utility.db_gateway import (GraphAnalyses, RdbAnalyses, RDBSaveException,
RDBInvalidRequestException)
from sqlalchemy.exc import SQLAlchemyError
class GraphAnalysesTest(unittest.TestCase):
"""Test Communicator."""
@classmethod
def setUpClass(cls):
"""Class variables initialised."""
cls.eco = 'eco'
cls.ver = '1'
cls.pkg = 'pkg'
# Read Vendor Data from JSON.
gremlin_batch_data = os.path.join('/bayesian/tests/data/gremlin/gremlin_batch_data.json')
ca_batch_response = os.path.join('/bayesian/tests/data/response/ca_batch_response.json')
with open(ca_batch_response) as f:
cls.batch_response = json.load(f)
with open(gremlin_batch_data) as f:
cls.gremlin_batch = json.load(f)
# Read Vendor Data from JSON.
rest_json_path2 = os.path.join(
os.path.dirname(__file__),
'..',
'data/gremlin/snyk_component_analyses_response.json')
with open(rest_json_path2) as f:
resp_json = json.load(f)
cls.resp_json = resp_json
@patch('bayesian.utility.db_gateway.post')
def test_get_data_from_graph(self, _mockpost):
"""Test Get data from Graph. Gremlin calls."""
_mockpost().json.return_value = self.resp_json
ga = GraphAnalyses.get_ca_data_from_graph('eco', 'pkg', 'ver', 'snyk')
self.assertIsInstance(ga, dict)
self.assertIn('result', ga)
self.assertIsInstance(ga.get('result'), dict)
self.assertIn('requestId', ga)
self.assertIsInstance(ga.get('requestId'), str)
self.assertIn('status', ga)
self.assertIsInstance(ga.get('status'), dict)
@patch('bayesian.utility.db_gateway.post')
def test_get_batch_ca_data(self, _mockpost):
"""Test get_batch_ca_data."""
_mockpost().json.return_value = self.gremlin_batch
ga = GraphAnalyses.get_batch_ca_data(
ecosystem='eco', packages=[{'name': 'django', 'version': '1.1'}])
self.assertIsInstance(ga, dict)
self.assertIn('result', ga)
self.assertIsInstance(ga.get('result'), dict)
self.assertIn('requestId', ga)
self.assertIsInstance(ga.get('requestId'), str)
self.assertIn('status', ga)
self.assertIsInstance(ga.get('status'), dict)
@patch('bayesian.utility.db_gateway.post', return_value=Exception)
def test_get_batch_ca_data_exception(self, _mockpost):
"""Test get_batch_ca_data_exception."""
self.assertRaises(Exception, GraphAnalyses.get_batch_ca_data,
'eco', packages=[{'name': 'django', 'version': '1.1'}],
query_key='ca_batch')
class TestRdbAnalyses(unittest.TestCase):
"""Test RDB Analyses."""
@patch('bayesian.utility.db_gateway.fetch_sa_request', return_value={})
def test_get_request_data_success(self, _fetch_sa_request):
"""Test get SA request data."""
rdbAnalyses = RdbAnalyses('dummy_request_id')
self.assertEqual(rdbAnalyses.get_request_data(), {})
@patch('bayesian.utility.db_gateway.fetch_sa_request', return_value=None)
def test_get_request_data_error(self, _fetch_sa_request):
"""Test get SA request data with return as 404 error."""
rdbAnalyses = RdbAnalyses('dummy_request_id')
with pytest.raises(Exception) as exception:
rdbAnalyses.get_request_data()
self.assertIs(exception.type, RDBInvalidRequestException)
@patch('bayesian.utility.db_gateway.retrieve_worker_result', return_value={})
def test_get_stack_result(self, _fetch_sa_request):
"""Test get SA stack result."""
rdbAnalyses = RdbAnalyses('dummy_request_id')
self.assertEqual(rdbAnalyses.get_stack_result(), {})
@patch('bayesian.utility.db_gateway.retrieve_worker_result', return_value={})
def test_get_recommendation_data(self, _fetch_sa_request):
"""Test get SA recommendation data."""
rdbAnalyses = RdbAnalyses('dummy_request_id')
self.assertEqual(rdbAnalyses.get_recommendation_data(), {})
@patch('bayesian.utility.db_gateway.rdb.session.execute',
side_effect=SQLAlchemyError('Mock exception'))
def test_save_post_request_error(self, _execute):
"""Test error save request that raises exception."""
rdbAnalyses = RdbAnalyses('dummy_request_id', '', {}, {})
with pytest.raises(Exception) as exception:
rdbAnalyses.save_post_request()
self.assertIs(exception.type, RDBSaveException)
@patch('bayesian.utility.db_gateway.rdb.session.execute', return_value=0)
@patch('bayesian.utility.db_gateway.rdb.session.commit', return_value=0)
def test_save_post_request_success(self, _commit, _execute):
"""Test success save request."""
rdbAnalyses = RdbAnalyses('dummy_request_id', '', {}, {})
self.assertEqual(rdbAnalyses.save_post_request(), None)
|
the-stack_0_20268 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import torch
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
import sys
import pdb
import numpy as np
from time import time
#-->List of SubTransformer configurations for selected HAT operating points.
model2000args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [3072, 3072, 3072, 2048, 3072, 2048], 'encoder_self_attention_heads': [8, 8, 4, 4, 8, 4]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 6, 'decoder_ffn_embed_dim': [3072, 3072, 3072, 3072, 3072, 3072], 'decoder_self_attention_heads': [8, 8, 4, 8, 4, 4], 'decoder_ende_attention_heads': [8, 8, 8, 4, 8, 8], 'decoder_arbitrary_ende_attn': [-1, 1, 1, 1, 1, -1]}}
model1250args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [3072, 3072, 3072, 2048, 3072, 3072], 'encoder_self_attention_heads': [8, 8, 8, 4, 8, 4]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 5, 'decoder_ffn_embed_dim': [3072, 3072, 3072, 3072, 3072], 'decoder_self_attention_heads': [4, 8, 8, 4, 4], 'decoder_ende_attention_heads': [8, 8, 8, 8, 8], 'decoder_arbitrary_ende_attn': [-1, 1, 1, 1, -1]}}
model1000args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [3072, 3072, 3072, 2048, 3072, 3072], 'encoder_self_attention_heads': [8, 8, 8, 4, 8, 4]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 4, 'decoder_ffn_embed_dim': [3072, 3072, 3072, 3072], 'decoder_self_attention_heads': [8, 8, 8, 4], 'decoder_ende_attention_heads': [8, 8, 8, 8], 'decoder_arbitrary_ende_attn': [1, 1, 1, -1]}}
model900args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [3072, 3072, 3072, 2048, 3072, 3072], 'encoder_self_attention_heads': [8, 8, 4, 8, 8, 8]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 3, 'decoder_ffn_embed_dim': [3072, 3072, 3072], 'decoder_self_attention_heads': [8, 8, 8], 'decoder_ende_attention_heads': [8, 8, 8], 'decoder_arbitrary_ende_attn': [1, 1, 1]}}
model700args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [3072, 3072, 3072, 2048, 3072, 3072], 'encoder_self_attention_heads': [8, 8, 8, 8, 8, 4]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 2, 'decoder_ffn_embed_dim': [3072, 3072], 'decoder_self_attention_heads': [8, 8], 'decoder_ende_attention_heads': [8, 8], 'decoder_arbitrary_ende_attn': [1, 1]}}
model350args = {'encoder': {'encoder_embed_dim': 512, 'encoder_layer_num': 6, 'encoder_ffn_embed_dim': [2048, 3072, 3072, 3072, 3072, 2048], 'encoder_self_attention_heads': [8, 8, 4, 8, 8, 8]}, 'decoder': {'decoder_embed_dim': 512, 'decoder_layer_num': 1, 'decoder_ffn_embed_dim': [3072], 'decoder_self_attention_heads': [8], 'decoder_ende_attention_heads': [8], 'decoder_arbitrary_ende_attn': [-1]}}
modelconfigs = {'350':model350args, '700':model700args, '900':model900args, '1000':model1000args, '1250':model1250args, '2000':model2000args}
modelargs = {}
outFile = open("d-HAT_output.txt", "w").close()
def main(args):
loop_count = 0
#-->Start timing for inital loading of SuperTransformer weights
build_start = time()
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
#print(args.lat_config)
use_cuda = torch.cuda.is_available() and not args.cpu
# when running on CPU, use fp32 as default
if not use_cuda:
args.fp16 = False
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
'''# Set dictionaries - tried in loop
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary'''
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
torch.manual_seed(args.seed)
build_end = time()
while True:
#-->Enter required latency constraint.
print ('Please enter a latency constraint/operating point to sample \nfrom the SuperTransformer design space (350, 700, 900, 1000, 1250, 2000):', file = sys.stderr)
input_lat = input()
args.lat_config = input_lat
#-->Start timing for sampling a new SubTransformer configuration.
lat_start = time()
print ("\n\nLatency constraint:", args.lat_config)
print ("\n")
outFile = open("d-HAT_output.txt", "a")
outFile.write("\nLatency constraint: {}\n\n".format(args.lat_config))
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
#-->Print loop info to debug file.
with open("debug_task.txt", "a") as dFile2:
print ("Start of loop X", file=dFile2)
print ("\n\n\n", file=dFile2)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if use_cuda:
model.cuda()
config = utils.get_subtransformer_config(args)
model.set_sample_config(modelconfigs[args.lat_config])
print(f"| Latency: {args.lat_config} ms", file = sys.stderr)
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
#print(model, file=sys.stderr)
#print(args.path, file=sys.stderr)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
lat_end = time()
#-->Start timing translation output for input sentences.
inference_start = time()
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
num_sentences = 0
has_target = True
decoder_times_all = []
input_len_all = []
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
#-->Print variable info to debug files.
with open("debug.txt", "w") as dFile:
print ("\n\n\n GLOBAL VARIABLES \n\n\n", file=dFile)
print (globals(), file = dFile)
print ("\n\n\n LOCAL VARIABLES \n\n\n", file=dFile)
print (locals(), file = dFile)
print ("\n\n\n", file=dFile)
with open("debug_task.txt", "a") as dFile2:
print ("Inference Step X", file=dFile2)
print (len(tgt_dict), file = dFile2)
print ("\n\n\n", file=dFile2)
gen_timer.start()
hypos, decoder_times = task.inference_step(generator, models, sample, prefix_tokens)
input_len_all.append(np.mean(sample['net_input']['src_lengths'].cpu().numpy()))
print(decoder_times)
decoder_times_all.append(decoder_times)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
if not args.quiet:
print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
#-->Printing to d-HAT output file.
outFile.write("Input[{}] (English): {}\n".format(sample_id,src_str))
outFile.write("Output[{}] (German): {}\n".format(sample_id,hypo_str))
outFile.write("Reference[{}] : {}\n".format(sample_id,target_str))
outFile.write("--------------------------------------------------\n\n")
wps_meter.update(num_generated_tokens)
t.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
inference_end = time()
#-->Printing latency information for the HAT model.
if loop_count == 0:
print(f"\n| **Time to load SuperTransformer weights: {build_end - build_start}**\n")
print(f"\n| **Time to load SuperTransformer weights: {build_end - build_start}**", file=sys.stderr)
outFile.write("| **Time to load SuperTransformer weights: {}**\n".format(build_end-build_start))
print(f"\n| **Time to sample SubTransformer configuration: {lat_end - lat_start}**\n")
print(f"\n| **Time to generate translations: {inference_end - inference_start}**\n")
print(f"| **Time to sample SubTransformer configuration: {lat_end - lat_start}**", file=sys.stderr)
print(f"| **Time to generate translations: {inference_end - inference_start}**\n", file=sys.stderr)
outFile.write("| **Time to sample SubTransformer configuration: {}**\n".format(lat_end-lat_start))
outFile.write("| **Time to generate translations: {}**\n".format(inference_end - inference_start))
outFile.write("--------------------------------------------------\n\n")
outFile.close()
print ("| **Translation complete. See file: d-HAT/d-HAT_output.txt**\n", file=sys.stderr)
loop_count += 1
def cli_main():
parser = options.get_generation_parser()
parser.add_argument('--encoder-embed-dim-subtransformer', type=int, help='subtransformer encoder embedding dimension',
default=None)
parser.add_argument('--decoder-embed-dim-subtransformer', type=int, help='subtransformer decoder embedding dimension',
default=None)
parser.add_argument('--encoder-ffn-embed-dim-all-subtransformer', nargs='+', default=None, type=int)
parser.add_argument('--decoder-ffn-embed-dim-all-subtransformer', nargs='+', default=None, type=int)
parser.add_argument('--encoder-layer-num-subtransformer', type=int, help='subtransformer num encoder layers')
parser.add_argument('--decoder-layer-num-subtransformer', type=int, help='subtransformer num decoder layers')
parser.add_argument('--encoder-self-attention-heads-all-subtransformer', nargs='+', default=None, type=int)
parser.add_argument('--decoder-self-attention-heads-all-subtransformer', nargs='+', default=None, type=int)
parser.add_argument('--decoder-ende-attention-heads-all-subtransformer', nargs='+', default=None, type=int)
parser.add_argument('--decoder-arbitrary-ende-attn-all-subtransformer', nargs='+', default=None, type=int)
#-->Set default latency config
parser.add_argument('--lat-config', default = '1000', help = 'default config to use from model param dictionary')
args = options.parse_args_and_arch(parser)
if args.pdb:
pdb.set_trace()
main(args)
if __name__ == '__main__':
cli_main()
|
the-stack_0_20269 | import tkinter as tk
from tkinter import messagebox
window = tk.Tk()
window.title("Geometric Progression")
window.geometry('400x500+300+300')
window.resizable(0,0)
#The Frames
arow = tk.Frame(window)
arow.pack(expand = True, fill = "x")
crrow = tk.Frame(window)
crrow.pack(expand = True, fill = "x")
nrow = tk.Frame(window)
nrow.pack(expand = True, fill = "x")
resultrow = tk.Frame(window)
resultrow.pack(expand = True, fill = "x")
btnrow = tk.Frame(window)
btnrow.pack(expand = True, fill = "both")
#The labels
lbla = tk.Label(
arow,
text = "Enter First Term",
font = ("Calibri",16),
)
lbla.grid(row = 0, column = 1)
lblcr = tk.Label(
crrow,
text = "Enter Common Ratio",
font = ("Calibri", 16),
)
lblcr.grid(row = 0, column = 1)
lbln = tk.Label(
nrow,
text = "Enter the number of terms",
font = ("Calibri", 16)
)
lbln.grid(row = 0, column = 1 )
lblr = tk.Label(
resultrow,
text = "The required term",
font = ("Calibri", 16)
)
lblr.grid(row = 0, column = 1)
#Text box designs
a = tk.StringVar()
cr = tk.StringVar()
n = tk.StringVar()
output = tk.StringVar()
#The first term
txta = tk.Entry(
arow,
width = 10,
font = ("Calibri",16),
textvariable = a
)
txta.place(x = 280, y = 0)
#The common ratio
txtcr = tk.Entry(
crrow,
width = 10,
font = ("Calibri", 16),
textvariable = cr
)
txtcr.place(x = 280, y = 0)
#The number of terms
txtn = tk.Entry(
nrow,
width = 10,
font = ("Calibri", 16),
textvariable = n
)
txtn.place(x = 280, y = 0)
#Validation buttons
def result():
a = (txta.get())
r = (txtcr.get())
n = (txtn.get())
if (a.isdigit() and r.isdigit() and n.isdigit()):
a = int(a)
r = int(r)
n = int(n)
C = a * r ** (n - 1)
output.set(C)
return True
else:
tk.messagebox.showwarning("Wrong Data!", "Please enter integer only to proceed.")
return False
def clear():
a.set("")
cr.set("")
n.set("")
output.set("")
#The label to show the output
lblr2 = tk.Label(
resultrow,
width = 10,
textvariable = output,
bg = "#ffffff",
fg = "#000000",
font = ("Calibri", 16,)
)
lblr2.place(x = 280, y = 0 )
#The buttons
btn_findterm = tk.Button(
btnrow,
text = "Find the Term",
font = ("Calibri",16),
command = result
)
btn_findterm.place(x = 10, y = 10)
btn_clear = tk.Button(
btnrow,
text = "Clear",
font = ("Calibri", 16),
command = clear
)
btn_clear.place(x = 275, y = 10)
window.mainloop()
|
the-stack_0_20270 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.legacy.plugin_connector import LightningCustomPlugins, PluginConnector
def test_available_plugins_trainer():
""" Test that available plugins return the correct list in the trainer. """
plugins = Trainer.available_plugins()
expected_plugins = [e.name for e in LightningCustomPlugins]
assert plugins == expected_plugins
def test_available_plugins_connector():
""" Test that available plugins return the correct list in the connector. """
plugins = PluginConnector.available_plugins()
expected_plugins = [e.name for e in LightningCustomPlugins]
assert plugins == expected_plugins
|
the-stack_0_20272 | import discord
from discord.ext import *
from discord.ext.commands import Bot
import asyncio
import time
import random
from discord.ext import commands, tasks
bot = commands.Bot
#I do not know how to do Object Oriented Programming so this is my best shot at a class full of commands
class commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
#Commands
#Purge command to delete messages
@commands.command()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount=1):
if amount>10:
await ctx.send('Too much to purge.', delete_after=3)
if amount<=10:
await ctx.channel.purge(limit=amount+1)
await ctx.send(f'Purged {amount} messages.', delete_after=3)
#Echos what the issuing person had said
@commands.command()
@commands.has_permissions(manage_messages=True)
async def say(self, ctx, *, message=" "):
await ctx.send(message)
@commands.command()
async def bhb(self, ctx):
await ctx.send('Yes, I am on.')
@commands.command()
async def help(self, ctx):
author = ctx.message.author
embed = discord.Embed(color = discord.Colour.blurple())
embed.set_author(name="Here's the list for Bonk's commands!")
embed.add_field(name = 'Normal Commands', value = '-----------------------', inline = False)
embed.add_field(name = '.purge', value = 'Purges up to 10 messages or less if specified.\n Use as `.purge (amount of messages up to 10)`.', inline = False)
embed.add_field(name = '.say', value = 'Repeats whatever you say.\n Beware, this can repeat anything and is locked to only those with the manage messages perm.\n Use as `.say (whatever message you want to repeat)`.', inline = False)
embed.add_field(name = '.bhb', value = 'A command used to see if the bot is online.\n Use as `.bhb`.', inline = False)
embed.add_field(name = '.help', value = 'This command! You just used it.\n Use as `.help`.', inline = False)
embed.add_field(name = 'Reddit Commands', value = '----------------------', inline = False)
embed.add_field(name = '.meme (also used as .m, .M, .MEME', value = 'Posts a random meme from Reddit!\n Use as `.meme`.', inline = False)
embed.add_field(name = '.porn (also used as .p, .P, .PORN, .sex, .horny)', value = 'Posts a random porn pic from Reddit!\n Use as `.porn`.', inline = False)
await author.send(embed=embed)
await ctx.send('Check your DMs!')
def setup(bot):
bot.add_cog(commands(bot))
|
the-stack_0_20273 | #!/usr/bin/env python3
# Copyright (c) 2019 The EROS developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from time import sleep
from test_framework.mininode import network_thread_start
from test_framework.test_framework import ErosTestFramwork
from test_framework.util import connect_nodes_bi, p2p_port
from fake_stake.util import TestNode
class EROS_RPCSporkTest(ErosTestFramwork):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-staking=1']] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_network(self):
''' Can't rely on syncing all the nodes when staking=1
:param:
:return:
'''
self.setup_nodes()
for i in range(self.num_nodes - 1):
for j in range(i+1, self.num_nodes):
connect_nodes_bi(self.nodes, i, j)
def init_test(self):
''' Initializes test parameters
:param:
:return:
'''
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
self.node = self.nodes[0]
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def printDict(self, d):
self.log.info("{")
for k in d:
self.log.info(" %s = %d" % (k, d[k]))
self.log.info("}")
def run_test(self):
self.description = "Performs tests on the Spork RPC"
# check spork values:
sporks = self.nodes[1].spork("show")
self.printDict(sporks)
active = self.nodes[1].spork("active")
assert(not active["SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"])
# activate SPORK 8
new_value = 1563253447
res = self.nodes[0].spork("SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT", new_value)
assert(res == "success")
sleep(1)
self.sync_all()
sporks = self.nodes[1].spork("show")
self.printDict(sporks)
assert(sporks["SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"] == new_value)
active = self.nodes[0].spork("active")
assert (active["SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"])
self.log.info("Stopping nodes...")
self.stop_nodes()
self.log.info("Restarting node 1...")
self.start_node(1, [])
sporks = self.nodes[1].spork("show")
self.printDict(sporks)
assert (sporks["SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"] == new_value)
if __name__ == '__main__':
EROS_RPCSporkTest().main()
|
the-stack_0_20274 | # inspired by the NmtMaster code
from canopen import Node
# status word 0x6041 bitmask and values in the list in the dictionary value
POWER_STATES_402 = {
'NOT READY TO SWITCH ON': [0x4F, 0x00],
'SWITCH ON DISABLED' : [0x4F, 0x40],
'READY TO SWITCH ON' : [0x6F, 0x21],
'SWITCHED ON' : [0x6F, 0x23],
'OPERATION ENABLED' : [0x6F, 0x27],
'FAULT' : [0x4F, 0x08],
'FAULT REACTION ACTIVE' : [0x4F, 0x0F],
'QUICK STOP ACTIVE' : [0x6F, 0x07]
}
# control word 0x6040
POWER_STATE_COMMANDS = {
'SWITCH ON DISABLED' : 0x80,
'DISABLE VOLTAGE' : 0x04,
'READY TO SWITCH ON' : 0x06,
'SWITCHED ON' : 0x07,
'OPERATION ENABLED' : 0x0F,
'QUICK STOP ACTIVE' : 0x02
}
COMMAND_TO_POWER_STATE = {
0x80: 'SWITCH ON DISABLED',
0x06: 'READY TO SWITCH ON',
0x07: 'SWITCHED ON',
0x0F: 'OPERATION ENABLED',
0x02: 'QUICK STOP ACTIVE'
}
class Node402(Node):
"""A CANopen CiA 402 profile slave node.
:param int node_id:
Node ID (set to None or 0 if specified by object dictionary)
:param object_dictionary:
Object dictionary as either a path to a file, an ``ObjectDictionary``
or a file like object.
:type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`
"""
def __init__(self, node_id, object_dictionary):
super(Node402, self).__init__(node_id, object_dictionary)
self.powerstate_402 = PowerStateMachine(self)
self.powerstate_402.network = self.network
def setup_402_state_machine(self):
# setup TPDO1 for this node
# TPDO1 will transmit the statusword of the 402 control state machine
# first read the current PDO setup and only change TPDO1
print(self.nmt.state)
self.nmt.state = 'PRE-OPERATIONAL'
self.pdo.tx[1].read()
self.pdo.tx[1].clear()
# Use register as to stay manufacturer agnostic
self.pdo.tx[1].add_variable(0x6041)
# add callback to listen to TPDO1 and change 402 state
self.pdo.tx[1].add_callback(self.powerstate_402.on_PDO1_callback)
self.pdo.tx[1].trans_type = 255
self.pdo.tx[1].enabled = True
self.pdo.tx[1].save()
self.nmt.state = 'OPERATIONAL'
class PowerStateMachine(object):
"""A CANopen CiA 402 Power State machine. Listens to state changes
of the DS402 Power State machine by means of TPDO 1 Statusword.
- Controlword 0x6040 causes transitions
- Statusword 0x6041 gives the current state
"""
def __init__(self, node):
self.id = node.id
self.node = node
self._state = 'NOT READY TO SWITCH ON'
@staticmethod
def on_PDO1_callback(mapobject):
# this function receives a map object.
# this map object is then used for changing the
# Node402.PowerstateMachine._state by reading the statusword
# The TPDO1 is defined in setup_402_state_machine
statusword = mapobject[0].raw
for key, value in POWER_STATES_402.items():
# check if the value after applying the bitmask (value[0])
# corresponds with the value[1] to determine the current status
bitmaskvalue = statusword & value[0]
if bitmaskvalue == value[1]:
mapobject.pdo_node.node.powerstate_402._state = key
@property
def state(self):
"""Attribute to get or set node's state as a string.
States of the node can be one of:
- 'NOT READY TO SWITCH ON'
- 'SWITCH ON DISABLED'
- 'READY TO SWITCH ON'
- 'SWITCHED ON'
- 'OPERATION ENABLED'
- 'FAULT'
- 'FAULT REACTION ACTIVE'
- 'QUICK STOP ACTIVE'
States to switch to can be one of:
- 'SWITCH ON DISABLED'
- 'DISABLE VOLTAGE'
- 'READY TO SWITCH ON'
- 'SWITCHED ON'
- 'OPERATION ENABLED'
- 'QUICK STOP ACTIVE'
"""
if self._state in POWER_STATES_402.values():
return POWER_STATES_402[self._state]
else:
return self._state
@state.setter
def state(self, new_state):
if new_state in POWER_STATE_COMMANDS:
code = POWER_STATE_COMMANDS[new_state]
else:
raise ValueError("'%s' is an invalid state. Must be one of %s." %
(new_state, ", ".join(POWER_STATE_COMMANDS)))
# send the control word in a manufacturer agnostic way
# by not using the EDS ParameterName but the register number
self.node.sdo[0x6040].raw = code
|
the-stack_0_20276 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import division
import collections
import copy
import itertools
import re
import warnings
from .card import Card, _pad, KEYWORD_LENGTH
from .file import _File
from .util import encode_ascii, decode_ascii, fileobj_closed, fileobj_is_binary
from ...extern import six
from ...extern.six import string_types, itervalues, iteritems, next
from ...extern.six.moves import zip, range, zip_longest
from ...utils import isiterable
from ...utils.exceptions import AstropyUserWarning
from ...utils.decorators import deprecated_renamed_argument
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(encode_ascii(
r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])'))
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(chr(x) for x in range(0x20, 0x7F))
END_CARD = 'END' + ' ' * 77
__doctest_skip__ = ['Header', 'Header.*']
class Header(object):
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : A list of `Card` objects, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = six.iteritems(cards)
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return Header([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return Header([copy.copy(self._cards[idx])
for idx in self._wildcardmatch(key)])
elif (isinstance(key, string_types) and
key.upper() in Card._commentary_keywords):
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
return card.value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if not (0 < len(value) <= 2):
raise ValueError(
'A Header item may be set with either a scalar value, '
'a 1-tuple containing a scalar value, or a 2-tuple '
'containing a scalar value and comment string.')
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = ''
elif len(value) == 2:
value, comment = value
if value is None:
value = ''
if comment is None:
comment = ''
else:
comment = None
card = None
if isinstance(key, int):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, string_types):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError("Keyword '{}' not found.".format(key))
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep='\n', endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__['_modified'] = True
return self.__dict__['_modified']
@_modified.setter
def _modified(self, val):
self.__dict__['_modified'] = val
@classmethod
def fromstring(cls, data, sep=''):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str
String containing the entire header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
Returns
-------
header
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == 'CONTINUE':
image.append(next_image)
continue
cards.append(Card.fromstring(''.join(image)))
if require_full_cardlength:
if next_image == END_CARD:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == 'END':
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(''.join(image)))
return cls(cards)
@classmethod
def fromfile(cls, fileobj, sep='', endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`IOError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
header
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, string_types):
# Open in text mode by default to support newline handling; if a
# binary-mode file object is passed in, the user is on their own
# with respect to newline handling
fileobj = open(fileobj, 'r')
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard,
padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise IOError('Header missing END card.')
header_str = ''.join(read_blocks)
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == '\0':
if is_eof and header_str.strip('\0') == '':
# TODO: Pass this warning to validation framework
warnings.warn(
'Unexpected extra padding at the end of the file. This '
'padding may not be preserved when saving changes.',
AstropyUserWarning)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
'Header block contains null bytes instead of spaces for '
'padding, and is not FITS-compliant. Nulls may be '
'replaced with spaces upon writing.', AstropyUserWarning)
header_str.replace('\0', ' ')
if padding and (len(header_str) % actual_block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - actual_block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(
'Header size is not multiple of {0}: {1}'.format(BLOCK_SIZE,
actual_len))
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group('invalid'):
offset = mo.start()
trailing = block[offset + 3:offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip('ub')
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Unexpected bytes trailing END keyword: {0}; these '
'bytes will be replaced with spaces on write.'.format(
trailing), AstropyUserWarning)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Missing padding to end of the FITS block after the '
'END keyword; additional spaces will be appended to '
'the file upon writing to pad out to {0} '
'bytes.'.format(BLOCK_SIZE), AstropyUserWarning)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (block[:offset] + encode_ascii(END_CARD) +
block[offset + len(END_CARD):])
return True, block
return False, block
def tostring(self, sep='', endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
s : str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[:Card.length])
s = s[Card.length:]
s = sep.join(lines)
if endcard:
s += sep + _pad('END')
if padding:
s += ' ' * _pad_length(len(s))
return s
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def tofile(self, fileobj, sep='', endcard=True, padding=True,
overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : str, file, optional
Either the pathname of a file, or an open file handle or file-like
object
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` (``IOError`` for Python 2) if ``False`` and the
output file exists. Default is ``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise IOError(
'Header size ({}) is not a multiple of block '
'size ({}).'.format(
len(blocks) - actual_block_size + BLOCK_SIZE,
BLOCK_SIZE))
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, IOError):
offset = 0
fileobj.write(blocks.encode('ascii'))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False)
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
See Also
--------
tofile
"""
self.tofile(fileobj, sep='\n', endcard=endcard, padding=False,
overwrite=overwrite)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
header
A new :class:`Header` instance.
"""
tmp = Header((copy.copy(card) for card in self._cards))
if strip:
tmp._strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
header
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (len(keyword) <= KEYWORD_LENGTH and
Card._keywd_FSC_RE.match(keyword) and
keyword not in self._keyword_indices):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if (new_keyword not in Card._commentary_keywords and
new_keyword in self):
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after,
replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before,
after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
return list(iteritems(self))
def iteritems(self):
"""Like :meth:`dict.iteritems`."""
for card in self._cards:
yield (card.keyword, card.value)
def iterkeys(self):
"""
Like :meth:`dict.iterkeys`--iterating directly over the `Header`
instance has the same behavior.
"""
return self.__iter__()
def itervalues(self):
"""Like :meth:`dict.itervalues`."""
for _, v in iteritems(self):
yield v
def keys(self):
"""
Return a list of keywords in the header in the order they
appear--like :meth:`dict.keys` but ordered.
"""
return [keyword for keyword in self]
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError('Header.pop expected at most 2 arguments, got '
'{}'.format(len(args)))
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(iteritems(self))
except StopIteration:
raise KeyError('Header is empty')
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
'Header update value for key %r is invalid; the '
'value must be either a scalar, a 1-tuple '
'containing the scalar value, or a 2-tuple '
'containing the value and a comment string.' % k)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, 'items'):
for k, v in iteritems(other):
update_from_dict(k, v)
elif hasattr(other, 'keys'):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
'Header update sequence item #{} is invalid; '
'the item must either be a 2-tuple containing '
'a keyword and value, or a 3-tuple containing '
'a keyword, value, and comment string.'.format(idx))
if kwargs:
self.update(kwargs)
def values(self):
"""Returns a list of the values of all cards in the header."""
return [v for _, v in iteritems(self)]
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, string_types):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (idx >= 0 and
self._cards[idx].keyword in Card._commentary_keywords):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(self, cards, strip=True, unique=False, update=False,
update_first=False, useblanks=True, bottom=False, end=False):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = Header(cards)
if strip:
temp._strip()
if len(self):
first = self.cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if ((keyword == 'SIMPLE' and first == 'XTENSION') or
(keyword == 'XTENSION' and first == 'SIMPLE')):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError("Keyword {!r} not found.".format(keyword))
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError('The keyword {!r} is not in the '
' header.'.format(keyword))
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, int):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, string_types):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
'A {!r} keyword already exists in this header. Inserting '
'duplicate keyword.'.format(keyword), AstropyUserWarning)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError("Keyword '{}' not found.".format(keyword))
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == 'CONTINUE':
raise ValueError('Can not rename to CONTINUE')
if (newkeyword in Card._commentary_keywords or
oldkeyword in Card._commentary_keywords):
if not (newkeyword in Card._commentary_keywords and
oldkeyword in Card._commentary_keywords):
raise ValueError('Regular and commentary keys can not be '
'renamed to each other.')
elif not force and newkeyword in self:
raise ValueError('Intended keyword {} already exists in header.'
.format(newkeyword))
idx = self.index(oldkeyword)
card = self.cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('HISTORY', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('COMMENT', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('', value, before=before, after=after)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.upper()
if keyword.startswith('HIERARCH '):
keyword = keyword[9:]
if (keyword not in Card._commentary_keywords and
keyword in self._keyword_indices):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, string_types):
keyword = key
n = 0
elif isinstance(key, int):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError('Header index out of range.')
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (len(key) != 2 or not isinstance(key[0], string_types) or
not isinstance(key[1], int)):
raise ValueError(
'Tuple indices must be 2-tuples consisting of a '
'keyword string and an integer index.')
keyword, n = key
else:
raise ValueError(
'Header indices must be either a string, a 2-tuple, or '
'an integer.')
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or '.' in keyword:
raise KeyError("Keyword {!r} not found.".format(keyword))
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError("Keyword {!r} not found.".format(keyword))
try:
return indices[n]
except IndexError:
raise IndexError('There are only {} {!r} cards in the '
'header.'.format(len(indices), keyword))
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (isinstance(insertionkey, int) and
insertionkey >= len(self._cards)):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if (insertion_idx >= len(self._cards) and
old_idx == len(self._cards) - 1):
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in itervalues(index_sets):
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return (isinstance(keyword, string_types) and
(keyword.endswith('...') or '*' in keyword or '?' in keyword))
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace('*', r'.*').replace('?', r'.')
pattern = pattern.replace('...', r'\S*') + '$'
pattern_re = re.compile(pattern, re.I)
return [idx for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, string_types) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx:idx + maxlen]))
idx += maxlen
return cards
def _strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
if 'NAXIS' in self:
naxis = self['NAXIS']
else:
naxis = 0
if 'TFIELDS' in self:
tfields = self['TFIELDS']
else:
tfields = 0
for idx in range(naxis):
try:
del self['NAXIS' + str(idx + 1)]
except KeyError:
pass
for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE',
'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'):
for idx in range(tfields):
try:
del self[name + str(idx + 1)]
except KeyError:
pass
for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND',
'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO',
'TFIELDS'):
try:
del self[name]
except KeyError:
pass
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before,
after=after)
else:
self[key] = value
# Some fixes for compatibility with the Python 3 dict interface, where
# iteritems -> items, etc.
if not six.PY2:
keys = iterkeys
values = itervalues
items = iteritems
del iterkeys
del itervalues
del iteritems
collections.MutableSequence.register(Header)
collections.MutableMapping.register(Header)
class _CardAccessor(object):
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return '\n'.join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, string_types):
if len(self) == 1:
other = [other]
else:
return False
for a, b in zip_longest(self, other):
if a != b:
return False
else:
return True
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, string_types) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
collections.Mapping.register(_CardAccessor)
collections.Sequence.register(_CardAccessor)
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment,
len=keyword_length)
for c in self._header._cards)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super(_HeaderComments, self).__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=''):
super(_HeaderCommentaryCards, self).__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return '\n'.join(self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, int):
raise ValueError('{} index must be an integer'.format(self._keyword))
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
|
the-stack_0_20278 | """Support for Home Assistant iOS app sensors."""
from homeassistant.components import ios
from homeassistant.const import UNIT_PERCENTAGE
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
SENSOR_TYPES = {
"level": ["Battery Level", UNIT_PERCENTAGE],
"state": ["Battery State", None],
}
DEFAULT_ICON_LEVEL = "mdi:battery"
DEFAULT_ICON_STATE = "mdi:power-plug"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iOS sensor."""
# Leave here for if someone accidentally adds platform: ios to config
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up iOS from a config entry."""
dev = []
for device_name, device in ios.devices(hass).items():
for sensor_type in ("level", "state"):
dev.append(IOSSensor(sensor_type, device_name, device))
async_add_entities(dev, True)
class IOSSensor(Entity):
"""Representation of an iOS sensor."""
def __init__(self, sensor_type, device_name, device):
"""Initialize the sensor."""
self._device_name = device_name
self._name = f"{device_name} {SENSOR_TYPES[sensor_type][0]}"
self._device = device
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def device_info(self):
"""Return information about the device."""
return {
"identifiers": {
(
ios.DOMAIN,
self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_PERMANENT_ID],
)
},
"name": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_NAME],
"manufacturer": "Apple",
"model": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_TYPE],
"sw_version": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_SYSTEM_VERSION],
}
@property
def name(self):
"""Return the name of the iOS sensor."""
device_name = self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_NAME]
return f"{device_name} {SENSOR_TYPES[self.type][0]}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
device_id = self._device[ios.ATTR_DEVICE_ID]
return f"{self.type}_{device_id}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the device state attributes."""
device = self._device[ios.ATTR_DEVICE]
device_battery = self._device[ios.ATTR_BATTERY]
return {
"Battery State": device_battery[ios.ATTR_BATTERY_STATE],
"Battery Level": device_battery[ios.ATTR_BATTERY_LEVEL],
"Device Type": device[ios.ATTR_DEVICE_TYPE],
"Device Name": device[ios.ATTR_DEVICE_NAME],
"Device Version": device[ios.ATTR_DEVICE_SYSTEM_VERSION],
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
device_battery = self._device[ios.ATTR_BATTERY]
battery_state = device_battery[ios.ATTR_BATTERY_STATE]
battery_level = device_battery[ios.ATTR_BATTERY_LEVEL]
charging = True
icon_state = DEFAULT_ICON_STATE
if battery_state in (
ios.ATTR_BATTERY_STATE_FULL,
ios.ATTR_BATTERY_STATE_UNPLUGGED,
):
charging = False
icon_state = f"{DEFAULT_ICON_STATE}-off"
elif battery_state == ios.ATTR_BATTERY_STATE_UNKNOWN:
battery_level = None
charging = False
icon_state = f"{DEFAULT_ICON_LEVEL}-unknown"
if self.type == "state":
return icon_state
return icon_for_battery_level(battery_level=battery_level, charging=charging)
def update(self):
"""Get the latest state of the sensor."""
self._device = ios.devices(self.hass).get(self._device_name)
self._state = self._device[ios.ATTR_BATTERY][self.type]
|
the-stack_0_20279 | import math
import heapq # for retrieval topK
import multiprocessing
import numpy as np
from time import time
import paddle.fluid as fluid
import os
from gmf import GMF
from mlp import MLP
from neumf import NeuMF
from Dataset import Dataset
import logging
import paddle
import args
import utils
import time
# Global variables that are shared across processes
_model = None
_testRatings = None
_testNegatives = None
_K = None
_args = None
_model_path = None
def run_infer(args, model_path, test_data_path):
test_data_generator = utils.CriteoDataset()
with fluid.scope_guard(fluid.Scope()):
test_reader = fluid.io.batch(
test_data_generator.test(test_data_path, False),
batch_size=args.test_batch_size)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
model_path, exe)
for data in test_reader():
user_input = np.array([dat[0] for dat in data])
item_input = np.array([dat[1] for dat in data])
pred_val = exe.run(
infer_program,
feed={"user_input": user_input,
"item_input": item_input},
fetch_list=fetch_vars,
return_numpy=True)
return pred_val[0].reshape(1, -1).tolist()[0]
def evaluate_model(args, testRatings, testNegatives, K, model_path):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
global _model
global _testRatings
global _testNegatives
global _K
global _model_path
global _args
_args = args
_model_path = model_path
_testRatings = testRatings
_testNegatives = testNegatives
_K = K
hits, ndcgs = [], []
for idx in range(len(_testRatings)):
(hr, ndcg) = eval_one_rating(idx)
hits.append(hr)
ndcgs.append(ndcg)
return (hits, ndcgs)
def eval_one_rating(idx):
rating = _testRatings[idx]
items = _testNegatives[idx]
u = rating[0]
gtItem = rating[1]
items.append(gtItem)
# Get prediction scores
map_item_score = {}
users = np.full(len(items), u, dtype='int32')
users = users.reshape(-1, 1)
items_array = np.array(items).reshape(-1, 1)
temp = np.hstack((users, items_array))
np.savetxt("Data/test.txt", temp, fmt='%d', delimiter=',')
predictions = run_infer(_args, _model_path, _args.test_data_path)
for i in range(len(items)):
item = items[i]
map_item_score[item] = predictions[i]
items.pop()
# Evaluate top rank list
ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
hr = getHitRatio(ranklist, gtItem)
ndcg = getNDCG(ranklist, gtItem)
return (hr, ndcg)
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in range(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i + 2)
return 0
|
the-stack_0_20281 | #!/usr/bin/env python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import shutil
import glob
def clean():
to_delete = [
'session_stats',
'libtorrent_logs*',
'round_trip_ms.log',
'dht.log',
'upnp.log',
'natpmp.log',
'bin',
'build-aux',
'.deps',
'test_tmp_*',
'bjam_build.*.xml',
'*.exe',
'*.pdb',
'*.pyd',
'dist',
'build',
'.libs',
'*.cpp.orig',
'*.cpp.rej',
'*.hpp.orig',
'*.hpp.rej',
'*.gcov',
'*.gcno',
'*.gcda',
'lib*.a',
'Jamfile.rej',
'Jamfile.orig',
'*.o',
'*.lo',
'autom4te.cache',
'configure',
'config.report',
'config.log',
'.lib',
'CMakeFiles',
'CMakeCache.txt',
'checking_benchmark',
'cpu_benchmark',
]
directories = [
'examples',
'test',
'.',
'tools',
'src',
'simulation',
'fuzzers',
os.path.join('src', 'kademlia'),
os.path.join('include', 'libtorrent'),
os.path.join('include', os.path.join('libtorrent', '_aux')),
os.path.join('include', os.path.join('libtorrent', 'kademlia')),
os.path.join('bindings', 'python'),
os.path.join('bindings', os.path.join('python', 'src')),
os.path.join('bindings', 'c'),
os.path.join('bindings', os.path.join('c', 'src')),
os.path.join('simulation', 'libsimulator')
]
for d in directories:
for f in to_delete:
path = os.path.join(d, f)
entries = glob.glob(path)
for p in entries:
try:
shutil.rmtree(p)
print(p)
except Exception as e:
print(p, e)
try:
os.remove(p)
print(p)
except Exception as e:
print(p, e)
if __name__ == "__main__":
clean()
|
the-stack_0_20282 | import random
import uuid
class ChatClient(object):
def __init__(self, conn=None, addr=None):
self.id = str(uuid.uuid4())
self.nick = 'user_{}'.format(random.random())
self.conn = conn
self.addr = addr
|
the-stack_0_20284 | from django.db.models import Q
from .pagination import PostLimitOffsetPagination, PostPageNumberPagination
from django.contrib.auth import get_user_model
from ...payment.models import MpesaPayment
from .serializers import (
UserTransactionSerializer,
UserAuthorizationSerializer,
TerminalListSerializer
)
from rest_framework import generics
from rest_framework.response import Response
from django.contrib import auth
from ...decorators import user_trail
from ...sale.models import Terminal
import logging
from rest_framework.decorators import api_view
from rest_framework import status
User = get_user_model()
debug_logger = logging.getLogger('debug_logger')
info_logger = logging.getLogger('info_logger')
error_logger = logging.getLogger('error_logger')
@api_view(['GET', 'POST'])
def login(request):
serializer = UserAuthorizationSerializer(data=request.data)
if request.method == 'POST':
if serializer.is_valid():
password = serializer.data['password']
username = serializer.data['email']
try:
terminal = serializer.data['terminal']
except:
terminal = 'Terminal not set'
if '@' in username:
kwargs = {'email': username}
else:
kwargs = {'name': username}
try:
user = get_user_model().objects.get(**kwargs)
if user.check_password(password) and user.has_perm('sale.add_drawercash') and user.has_perm('sale.change_drawercash'):
record_trail(request.user.name,user,terminal)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
return Response({'message':'Permission Denied!'}, status=status.HTTP_401_UNAUTHORIZED)
except:
return Response(serializer.errors, status=status.HTTP_403_FORBIDDEN)
else:
return Response(serializer.errors, status=status.HTTP_403_FORBIDDEN)
elif request.method == 'GET':
return Response(status=status.HTTP_400_BAD_REQUEST)
def record_trail(loggedin,user,terminal):
trail = str(user.name)+' '+\
str(user.email)+' logged in Termial:'+\
str(terminal)+'. Session active '+str(loggedin)
user_trail(user,trail,'view')
@api_view(['GET', 'POST'])
def logout(request):
auth.logout(request)
return Response({
'users': "User logged out successfully"})
class UserAuthorizationAPIView(generics.CreateAPIView):
"""docstring for UserAuthorizationAPIView"""
serializer_class = UserAuthorizationSerializer
class UserTransactionAPIView(generics.CreateAPIView,):
serializer_class = UserTransactionSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
user_trail(self.request.user, 'Drawer Cash:#'+str(serializer.data['amount'])+' added ','add')
info_logger.info('User: '+str(self.request.user)+' Drawer Cash:'+str(serializer.data['amount']))
class TerminalListAPIView(generics.ListAPIView):
pagination_class = PostLimitOffsetPagination
serializer_class = TerminalListSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Terminal.objects.all()
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(terminal_name__icontains=query)|
Q(terminal_number__icontains=query)
).order_by('-id')
return queryset_list
|
the-stack_0_20285 | ########################################################################################
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
########################################################################################
# Central configuration file for a branch, should never be integrated since it is unique for each branch
import json
import os
import re
########################################################################################################################
# Global constants
########################################################################################################################
BINTEMP_FOLDER = 'BinTemp' # Name of the of working build folder that will be created at the root folder
WAF_FILE_GLOB_WARNING_THRESHOLD = 1000 # Define a warning threshold in number file files that were hit during a waf_file
CACHE_FOLDER = 'Cache'
# Version stamp (GUID) of lmbrwaf that is used to signal that a clean of bintemp is necessary
# Only update this number if there are changes in WAF handling where it is not possible
# to track stale intermediate files caused by the waf changes. To ignore the bintemp
# cleaning check, set this value to None.
#
# Note: Only update this value as a last resort. If there were WAF changes that do not affect the generation or
# tracking of intermediate of generated files, then there is no need to wipe out BinTemp
LMBR_WAF_VERSION_TAG = "E7A426A7-350D-4B4A-A619-ED1DA4463DCA"
# Optional additional table of copyrights.
# To add a company specific copyright, add a name value pair below to define the desired copyright statement for
# generated binaries and add the 'copyright_org' in your wscript definition
#
# e.g.
#
# ADDITIONAL_COPYRIGHT_TABLE = {
# 'MyCompany' : 'Copyright (c) MyCompany'
# }
#
# and in your module definition
#
# bld.CryEngineModule(
# ...
# copyright_org = 'MyCompany'
# ...
# )
########################################################################################################################
ADDITIONAL_COPYRIGHT_TABLE = {
}
########################################################################################################################
# Optional table of additional modules that will be loaded by WAF
#
# The table format is:
#
# <Key: Path of the directory for a set of waf modules> :
# [ < List of WAF python modules to load into the WAF build system, relative to the path directory from the key > ]
#
# For each of the modules in the python module list, they represent the relative full filename of the module to load.
# To restrict modules to only load for a specific host platform
#
# e.g.
#
# ADDITIONAL_WAF_MODULES = {
# 'Tools/Build/custom_build' : [
# 'custom_a.py',
# 'custom_b.py:win32',
# 'custom_c.py:darwin'
# ]
# }
#
# The above example will load 'custom_a.py' for all platforms, 'custom_b.py' for only win32 platforms, and 'custom_c.py'
# for only darwin platforms
#
# Note: The methods that are to exposed in the modules must be decorated accordingly, as they are generally used
# based on the context of the command, and not through regular python imports
########################################################################################################################
ADDITIONAL_WAF_MODULES = {
}
########################################################################################################################
# Lumberyard version and build number information.
# The following section extrapolates the version number from the engine configuration file and is used to embed the
# value into the built binaries were applicable.
########################################################################################################################
LUMBERYARD_ENGINE_VERSION_CONFIG_FILENAME = 'engine.json'
SCRIPT_PATH = os.path.dirname(__file__)
with open(os.path.join(SCRIPT_PATH, LUMBERYARD_ENGINE_VERSION_CONFIG_FILENAME)) as ENGINE_FILE:
ENGINE_JSON_DATA = json.load(ENGINE_FILE)
LUMBERYARD_VERSION = ENGINE_JSON_DATA.get('LumberyardVersion', '0.0.0.0')
LUMBERYARD_COPYRIGHT_YEAR = ENGINE_JSON_DATA.get('LumberyardCopyrightYear', 2017)
LUMBERYARD_BUILD = 1088666
LUMBERYARD_ENGINE_PATH = os.path.abspath(ENGINE_JSON_DATA.get('ExternalEnginePath', '.'))
# validate the Lumberyard version string above
VERSION_NUMBER_PATTERN = re.compile("^(\.?\d+)*$")
if VERSION_NUMBER_PATTERN.match(LUMBERYARD_VERSION) is None:
raise ValueError('Invalid version string for the Lumberyard Version ({})'.format(LUMBERYARD_VERSION))
BINTEMP_CACHE_3RD_PARTY = '__cache_3p__'
BINTEMP_CACHE_TOOLS = '__cache_tools_'
BINTEMP_MODULE_DEF = 'module_def'
########################################################################################################################
# Additional paths to search for the WAF build. Can use alias' such as @ENGINE@ and @PROJECT@ for the engine and project
# roots, respectively
########################################################################################################################
ADDITIONAL_SEARCH_PATHS = [
]
|
the-stack_0_20286 | from pathlib import Path
from setuptools import find_packages, setup
requirements = [
'opencv-python',
'av',
'ffmpeg',
'websockets',
]
extras_require = {
'test': ['pytest'],
}
setup(
name='psivideo',
author='psivideo development team',
install_requires=requirements,
extras_require=extras_require,
packages=find_packages(),
include_package_data=True,
license='LICENSE.txt',
description='Audio tools supporting psiexperiment',
entry_points={
'console_scripts': [
'psivideo=psivideo.main:main',
],
},
#version=version,
)
|
the-stack_0_20287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DCC model
=========
"""
from __future__ import print_function, division
import numpy as np
from numba import jit, float64, void
__all__ = ['dcc_recursion_python', 'dcc_recursion_numba',
'corr_dcc_python','corr_dcc_numba']
def dcc_recursion_python(qmat, const, data, neg_data, param):
"""DCC recursion.
Parameters
----------
qmat : (nobs, ndim, ndim) array
Raw correlation matrix
const : (ndim, ndim) array
Constant
data : (nobs, ndim) array
Innovations
neg_data : (nobs, ndim) array
Only negative innovations
param : (3,) array
DCC paeameters
"""
acorr, bcorr, dcorr = param
nobs = data.shape[0]
for t in range(1, nobs):
qmat[t] = const \
+ acorr * data[t-1][:, np.newaxis] * data[t-1] \
+ bcorr * qmat[t-1] \
+ dcorr * neg_data[t-1][:, np.newaxis] * neg_data[t-1]
@jit(void(float64[:, :, :], float64[:, :], float64[:, :],
float64[:, :], float64[:]), nopython=True, nogil=True, cache=True)
def dcc_recursion_numba(qmat, const, data, neg_data, param):
"""DCC recursion.
Parameters
----------
qmat : (nobs, ndim, ndim) array
Raw correlation matrix
const : (ndim, ndim) array
Constant
data : (nobs, ndim) array
Innovations
neg_data : (nobs, ndim) array
Only negative innovations
param : (3,) array
DCC paeameters
"""
acorr, bcorr, dcorr = param
nobs, ndim = data.shape
for t in range(1, nobs):
for i in range(ndim):
for j in range(ndim):
qmat[t, i, j] = const[i, j] \
+ acorr * data[t-1, i] * data[t-1, j] \
+ bcorr * qmat[t-1, i, j] \
+ dcorr * neg_data[t-1, i] * neg_data[t-1, j]
def corr_dcc_python(corr_dcc, qmat):
"""Convert Q matrix to correlation matrix.
Parameters
----------
corr_dcc : (nobs, ndim, ndim) array
Correlation matrix
qmat : (nobs, ndim, ndim) array
Raw correlation matrix
"""
nobs, ndim = qmat.shape[:2]
for t in range(nobs):
qdiag = np.diag(qmat[t])**.5
if not (np.isfinite(qdiag).all() & (qdiag > 0).all()):
raise ValueError('Invalid diagonal of Q matrix!')
corr_dcc[t] = qmat[t] / (qdiag[:, np.newaxis] * qdiag)
corr_dcc[t][np.diag_indices(ndim)] = np.ones(ndim)
@jit(void(float64[:, :, :], float64[:, :, :]),
nopython=True, nogil=True, cache=True)
def corr_dcc_numba(corr_dcc, qmat):
"""Convert Q matrix to correlation matrix.
Parameters
----------
corr_dcc : (nobs, ndim, ndim) array
Correlation matrix
qmat : (nobs, ndim, ndim) array
Raw correlation matrix
"""
nobs, ndim = qmat.shape[:2]
for t in range(nobs):
for i in range(ndim):
for j in range(ndim):
cond1 = np.isfinite(qmat[t, i, j])
cond2 = qmat[t, i, i] > 0
cond3 = qmat[t, j, j] > 0
if not (cond1 & cond2 & cond3):
raise ValueError('Invalid diagonal of Q matrix!')
corr_dcc[t, i, j] = qmat[t, i, j] \
/ (qmat[t, i, i] * qmat[t, j, j])**.5
corr_dcc[t, i, i] = 1.
|
the-stack_0_20288 | import warnings
from pathlib import Path
from textwrap import dedent
import numpy
import pandas
from shapely.geometry import Point
import geopandas
from pygridtools import iotools
from pygridtools import misc
from pygridtools import validate
GEFDC_TEMPLATE = dedent("""\
C1 TITLE
C1 (LIMITED TO 80 CHARACTERS)
'{0}'
C2 INTEGER INPUT
C2 NTYPE NBPP IMIN IMAX JMIN JMAX IC JC
0 0 1 {1} 1 {2} {1} {2}
C3 GRAPHICS GRID INFORMATION
C3 ISGG IGM JGM DXCG DYCG NWTGG
0 0 0 0. 0. 1
C4 CARTESIAN AND GRAPHICS GRID COORDINATE DATA
C4 CDLON1 CDLON2 CDLON3 CDLAT1 CDLAT2 CDLAT3
0. 0. 0. 0. 0. 0.
C5 INTEGER INPUT
C5 ITRXM ITRHM ITRKM ITRGM NDEPSM NDEPSMF DEPMIN DDATADJ
200 200 200 200 4000 0 0 0
C6 REAL INPUT
C6 RPX RPK RPH RSQXM RSQKM RSQKIM RSQHM RSQHIM RSQHJM
1.8 1.8 1.8 1.E-12 1.E-12 1.E-12 1.E-12 1.E-12 1.E-12
C7 COORDINATE SHIFT PARAMETERS
C7 XSHIFT YSHIFT HSCALE RKJDKI ANGORO
0. 0. 1. 1. 5.0
C8 INTERPOLATION SWITCHES
C8 ISIRKI JSIRKI ISIHIHJ JSIHIHJ
1 0 0 0
C9 NTYPE = 7 SPECIFIED INPUT
C9 IB IE JB JE N7RLX NXYIT ITN7M IJSMD ISMD JSMD RP7 SERRMAX
C10 NTYPE = 7 SPECIFIED INPUT
C10 X Y IN ORDER (IB,JB) (IE,JB) (IE,JE) (IB,JE)
C11 DEPTH INTERPOLATION SWITCHES
C11 ISIDEP NDEPDAT CDEP RADM ISIDPTYP SURFELEV ISVEG NVEGDAT NVEGTYP
1 {3:d} 2 0.5 1 0.0 0 0 0
C12 LAST BOUNDARY POINT INFORMATION
C12 ILT JLT X(ILT,JLT) Y(ILT,JLT)
0 0 0.0 0.0
C13 I J X(I,J) Y(I,J)
""")
def _n_digits(num):
return int(numpy.floor(numpy.log10(num))) + 1
def write_cellinp(cell_array, outputfile='cell.inp', mode='w',
writeheader=True, rowlabels=True,
maxcols=125, flip=True):
"""Writes the cell.inp input file from an array of cell definitions.
Parameters
----------
cell_array : numpy array
Integer array of the values written to ``outfile``.
outputfile : optional string (default = "cell.inp")
Path *and* filename to the output file. Yes, you have to tell it
to call the file cell.inp
maxcols : optional int (default = 125)
Number of columns at which cell.inp should be wrapped. ``gefdc``
requires this to be 125.
flip : optional bool (default = True)
Numpy arrays have their origin in the upper left corner, so in
a sense south is up and north is down. This means that arrays
need to be flipped before writing to "cell.inp". Unless you are
_absolutely_sure_ that your array has been flipped already,
leave this parameter as True.
Returns
-------
None
See also
--------
make_gefdc_cells
"""
if flip:
cell_array = numpy.flipud(cell_array)
nrows, ncols = cell_array.shape
rowfmt = '{0:3d} {1:s}\n'
colfmt = f'{{:0{_n_digits(ncols)}d}}'
if cell_array.shape[1] > maxcols:
first_array = cell_array[:, :maxcols]
second_array = cell_array[:, maxcols:]
write_cellinp(first_array, outputfile=outputfile, mode=mode,
writeheader=writeheader, rowlabels=rowlabels,
maxcols=maxcols, flip=False)
write_cellinp(second_array, outputfile=outputfile, mode='a',
writeheader=False, rowlabels=False,
maxcols=maxcols, flip=False)
else:
columns = numpy.arange(1, maxcols + 1, dtype=int)
colstr = [list('{:04d}'.format(c)) for c in columns]
hundreds = ''.join([c[1] for c in colstr])
tens = ''.join([c[2] for c in colstr])
ones = ''.join([c[3] for c in colstr])
with Path(outputfile).open(mode) as outfile:
if writeheader:
title = 'C -- cell.inp for EFDC model by pygridtools\n'
outfile.write(title)
outfile.write('C {}\n'.format(hundreds[:ncols]))
outfile.write('C {}\n'.format(tens[:ncols]))
outfile.write('C {}\n'.format(ones[:ncols]))
for n, row in enumerate(cell_array):
row_number = nrows - n
row_strings = row.astype(str)
cell_text = ''.join(row_strings.tolist())
if rowlabels:
rowheader = ''
row_text = rowfmt.format(int(row_number), cell_text)
else:
row_text = ' {0:s}\n'.format(cell_text)
outfile.write(row_text)
def write_gefdc_control_file(outfile, title, max_i, max_j, bathyrows):
gefdc = GEFDC_TEMPLATE.format(title[:80], max_i, max_j, bathyrows)
with Path(outfile).open('w') as f:
f.write(gefdc)
return gefdc
def write_gridout_file(xcoords, ycoords, outfile):
xcoords, ycoords = validate.xy_array(xcoords, ycoords, as_pairs=False)
ny, nx = xcoords.shape
df = pandas.DataFrame({
'x': xcoords.flatten(),
'y': ycoords.flatten()
})
with Path(outfile).open('w') as f:
f.write('## {:d} x {:d}\n'.format(nx, ny))
# XXX: https://github.com/pandas-dev/pandas/issues/21882
with Path(outfile).open('a') as f:
df.to_csv(f, sep=' ', index=False, header=False,
na_rep='NaN', float_format='%.3f',
mode='a')
return df
def write_gridext_file(tidydf, outfile, icol='ii', jcol='jj',
xcol='easting', ycol='northing'):
# make sure cols are in the right order
df = tidydf[[icol, jcol, xcol, ycol]]
with Path(outfile).open('w') as f:
df.to_csv(f, sep=' ', index=False, header=False,
float_format=None)
return df
def convert_gridext_to_gis(inputfile, outputfile, crs=None, river='na', reach=0):
""" Converts gridext.inp from the rtools to a GIS file with
`geomtype = 'Point'`.
Parameters
----------
inputfile : string
Path and filename of the gridext.inp file
outputfile : string
Path and filename of the destination GIS file
crs : string, optional
A geopandas/proj/fiona-compatible string describing the coordinate
reference system of the x/y values.
river : optional string (default = None)
The river to be listed in the output file's attributes.
reach : optional int (default = 0)
The reach of the river to be listed in the output file's attributes.
Returns
-------
geopandas.GeoDataFrame
"""
errmsg = 'file {} not found'
if not Path(inputfile).exists:
raise ValueError(errmsg.format(inputfile))
gdf = (
pandas.read_csv(inputfile, sep='\s+', engine='python', header=None,
dtype={'ii': int, 'jj': int, 'x': float, 'y': float},
names=['ii', 'jj', 'x', 'y'])
.assign(id=lambda df: df.index)
.assign(ii_jj=lambda df:
df['ii'].astype(str).str.pad(3, fillchar='0') + '_' +
df['jj'].astype(str).str.pad(3, fillchar='0'))
.assign(elev=0.0, river=river, reach=reach)
.assign(geometry=lambda df: df.apply(lambda r: Point((r['x'], r['y'])), axis=1))
.drop(['x', 'y'], axis='columns')
.pipe(geopandas.GeoDataFrame, geometry='geometry', crs=crs)
)
gdf.to_file(outputfile)
return gdf
def make_gefdc_cells(node_mask, cell_mask=None, triangles=False):
""" Take an array defining the nodes as wet (1) or dry (0) create
the array of cell values needed for GEFDC.
Parameters
----------
node_mask : numpy bool array (N x M)
Bool array specifying if a *node* is present in the raw
(unmasked) grid.
cell_mask : optional numpy bool array (N-1 x M-1) or None (default)
Bool array specifying if a cell should be masked (e.g. due to
being an island or something like that).
triangles : optional bool (default = False)
Currently not implemented. Will eventually enable the writting of
triangular cells when True.
Returns
-------
cell_array : numpy array
Integer array of the values written to ``outfile``.
"""
triangle_cells = {
0: 3,
1: 2,
3: 1,
2: 4,
}
land_cell = 0
water_cell = 5
bank_cell = 9
# I can't figure this out
if triangles:
warnings.warn('triangles are experimental')
# define the initial cells with everything labeled as a bank
ny, nx = cell_mask.shape
cells = numpy.zeros((ny + 2, nx + 2), dtype=int) + bank_cell
# loop through each *node*
for jj in range(1, ny + 1):
for ii in range(1, nx + 1):
# pull out the 4 nodes defining the cell (call it a quad)
quad = node_mask[jj - 1:jj + 1, ii - 1:ii + 1]
n_wet = quad.sum()
# anything that's masked is a "bank"
if not cell_mask[jj - 1, ii - 1]:
# if all 4 nodes are wet (=1), then the cell is 5
if n_wet == 4:
cells[jj, ii] = water_cell
# if only 3 are wet, might be a triangle, but...
# this ignored since we already raised an error
elif n_wet == 3 and triangles:
dry_node = numpy.argmin(quad.flatten())
cells[jj, ii] = triangle_cells[dry_node]
# otherwise it's just a bank
else:
cells[jj, ii] = bank_cell
padded_cells = numpy.pad(cells, 1, mode='constant', constant_values=bank_cell)
for cj in range(cells.shape[0]):
for ci in range(cells.shape[1]):
shift = 3
total = numpy.sum(padded_cells[cj:cj + shift, ci:ci + shift])
if total == bank_cell * shift**2:
cells[cj, ci] = land_cell
nrows = cells.shape[0]
ncols = cells.shape[1]
# nchunks = numpy.ceil(ncols / maxcols)
# if ncols > maxcols:
# final_cells = numpy.zeros((nrows*nchunks, maxcols), dtype=int)
# for n in numpy.arange(nchunks):
# col_start = n * maxcols
# col_stop = (n+1) * maxcols
# row_start = n * nrows
# row_stop = (n+1) * nrows
# cells_to_move = cells[:, col_start:col_stop]
# final_cells[row_start:row_stop, 0:cells_to_move.shape[1]] = cells_to_move
# else:
# final_cells = cells.copy()
final_cells = cells.copy()
return final_cells
class GEFDCWriter:
"""
Convenience class to write the GEFDC files for a ModelGrid
Parameters
----------
mg : pygridtools.ModelGrid
directory : str or Path
Where all of the files will be saved
"""
def __init__(self, mg, directory):
self.mg = mg
self.directory = Path(directory)
def control_file(self, filename='gefdc.inp', bathyrows=0,
title=None):
"""
Generates the GEFDC control (gefdc.inp) file for the EFDC grid
preprocessor.
Parameters
----------
filename : str, optional
The name of the output file.
bathyrows : int, optional
The number of rows in the grid's bathymetry data file.
title : str, optional
The title of the grid as portrayed in ``filename``.
Returns
-------
gefdc : str
The text of the output file.
"""
if not title:
title = 'Model Grid from pygridtools'
outfile = self.directory / filename
gefdc = write_gefdc_control_file(
outfile,
title,
self.mg.inodes + 1,
self.mg.jnodes + 1,
bathyrows
)
return gefdc
def cell_file(self, filename='cell.inp', triangles=False,
maxcols=125):
"""
Generates the cell definition/ASCII-art file for GEFDC.
.. warning:
This whole thing is probably pretty buggy.
Parameters
----------
filename : str, optional
The name of the output file.
triangles : bool, optional
Toggles the inclusion of triangular cells.
.. warning:
This is experimental and probably buggy if it has been
implmented at all.
maxcols : int, optional
The maximum number of columns to write to each row. Cells
beyond this number will be writted in separate section at
the bottom of the file.
Returns
-------
cells : str
The text of the output file.
"""
cells = make_gefdc_cells(
~numpy.isnan(self.mg.xn),
self.mg.cell_mask,
triangles=triangles
)
outfile = self.directory / filename
write_cellinp(cells, outputfile=outfile, flip=True, maxcols=maxcols)
return cells
def gridout_file(self, filename='grid.out'):
"""
Writes to the nodes as coordinate pairs for GEFDC.
Parameters
----------
filename : str, optional
The name of the output file.
Returns
-------
df : pandas.DataFrame
The dataframe of node coordinate pairs.
"""
outfile = self.directory / filename
df = write_gridout_file(self.mg.xn, self.mg.yn, outfile)
return df
def gridext_file(self, filename='gridext.inp', shift=2):
"""
Writes to the nodes and I/J cell index as to a file for GEFDC.
Parameters
----------
filename : str, optional
The name of the output file.
shift : int, optional
The shift that should be applied to the I/J index. The
default value to 2 means that the first cell is at (2, 2)
instead of (0, 0).
Returns
-------
df : pandas.DataFrame
The dataframe of coordinates and I/J index.
"""
outfile = self.directory / filename
df = self.mg.to_dataframe().stack(level='ii', dropna=True).reset_index()
df['ii'] += shift
df['jj'] += shift
write_gridext_file(df, outfile)
return df
|
the-stack_0_20289 | import pandas as pd
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
df = pd.read_csv(url, names=names)
array = df.values
X = array[:,0:8]
y = array[:,8]
seed = 21
num_trees = 100
max_features = 3
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = RandomForestClassifier(n_estimators=num_trees, max_features=max_features)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print('results: ')
print(results)
print()
print('mean: ' + str(results.mean())) |
the-stack_0_20291 | # Copyright (c) 2010-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common.swob import bytes_to_wsgi
from swift.common.utils import json, public
from swift.common.middleware.s3api.controllers.base import Controller
from swift.common.middleware.s3api.etree import Element, SubElement, tostring
from swift.common.middleware.s3api.s3response import HTTPOk, AccessDenied, \
NoSuchBucket
from swift.common.middleware.s3api.utils import validate_bucket_name
class ServiceController(Controller):
"""
Handles account level requests.
"""
@public
def GET(self, req):
"""
Handle GET Service request
"""
resp = req.get_response(self.app, query={'format': 'json'})
containers = json.loads(resp.body)
containers = filter(
lambda item: validate_bucket_name(
item['name'], self.conf.dns_compliant_bucket_names),
containers)
# we don't keep the creation time of a bucket (s3cmd doesn't
# work without that) so we use something bogus.
elem = Element('ListAllMyBucketsResult')
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = req.user_id
SubElement(owner, 'DisplayName').text = req.user_id
buckets = SubElement(elem, 'Buckets')
for c in containers:
if self.conf.s3_acl and self.conf.check_bucket_owner:
container = bytes_to_wsgi(c['name'].encode('utf8'))
try:
req.get_response(self.app, 'HEAD', container)
except AccessDenied:
continue
except NoSuchBucket:
continue
bucket = SubElement(buckets, 'Bucket')
SubElement(bucket, 'Name').text = c['name']
SubElement(bucket, 'CreationDate').text = \
'2009-02-03T16:45:09.000Z'
body = tostring(elem)
return HTTPOk(content_type='application/xml', body=body)
|
the-stack_0_20293 | ###
### Copyright (C) 2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
from .vpp import VppTest
spec = load_test_spec("vpp", "mirroring")
class default(VppTest):
def before(self):
vars(self).update(
caps = platform.get_caps("vpp", "mirroring"),
metric = dict(type = "md5"),
vpp_op = "transpose",
)
super(default, self).before()
@slash.requires(*platform.have_caps("vpp", "mirroring"))
@slash.requires(*have_ffmpeg_filter("transpose_vaapi"))
@slash.parametrize(*gen_vpp_mirroring_parameters(spec))
def test(self, case, method):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
degrees = 0,
direction = map_transpose_direction(0, method),
method = method,
)
if self.direction is None:
slash.skip_test(
"{method} mirroring unsupported".format(**vars(self)))
self.vpp()
def check_metrics(self):
check_metric(**vars(self))
|
the-stack_0_20294 | """This is my file
This is the first python file that I have written.
"""
"""
print('Hello World')
if 1 == 2:
print('wut')
else:
print('ha')
x = 1
while x <= 5:
print(x)
print('haa')
x += 1
# other ways to increment # x
# x = x + 1
# x ++ does not work
"""
for thing in range(5):
print(thing)
for thing in 'hello':
print(thing)
my_list = [1,2,3,4,5]
for thing in my_list:
print(thing)
for val in range(15, 100):
print(
'{val} is divisible by 2: {dunno}'.format(
val=val, dunno=val%2 == 0))
def my_function():
# print('hello lunchtime')
return 'hello food'
print(my_function())
|
the-stack_0_20296 | from typing import Callable
from hyper_search.train_parameters import TrainParameters
from mydeep_keras.k_model import KModel
class KTrainContext(object):
def __init__(self,
model_provider: Callable[[], KModel],
params: TrainParameters,
augmentation) -> None:
super().__init__()
self.model_provider = model_provider
self.params = params
self.augmentation = augmentation
@property
def model(self):
return self.model_provider()
|
the-stack_0_20302 | import os
import numpy as np
import pickle
import shutil
import datetime
import cv2
import copyreg
from tqdm.std import tqdm
from . import compare_deepfeatures
from . import compare_compoelem_new
from . import compare_combined_vgg19
from . import compare_combined_sift
from . import compare_linkingArt
from . import compare_sift
from . import compare_orb
from . import compare_brief
# fix cv2 keypoint pickling error
def _pickle_keypoint(keypoint): # : cv2.KeyPoint
return cv2.KeyPoint, (
keypoint.pt[0],
keypoint.pt[1],
keypoint.size,
keypoint.angle,
keypoint.response,
keypoint.octave,
keypoint.class_id,
)
# Apply the bundling to pickle
copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)
#dataset_cleaned_extended_balanced = ceb_dataset -> combination of clean_data (all with _art classes nativity and virgin) dataset and files from prathmesn & ronak from 18.03.
osuname = os.uname().nodename
print("osuname", osuname)
if osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':
COMPOELEM_ROOT = "/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem"
elif osuname == 'lme117':
COMPOELEM_ROOT = "/home/zi14teho/compositional_elements"
else:
COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')
DATASTORE_NAME = "combined_datastore_ceb_dataset"
DATASTORE_FILE = COMPOELEM_ROOT+"/final_evaluation/"+DATASTORE_NAME+".pkl"
EVAL_RESULTS_FILE = COMPOELEM_ROOT+"/final_evaluation/evaluation_log.pkl"
datastore = pickle.load(open(DATASTORE_FILE, "rb"))
try:
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
# for log_entry in evaluation_log:
# log_entry["new"] = False
shutil.copyfile(EVAL_RESULTS_FILE, EVAL_RESULTS_FILE+"_"+str(datetime.date.today())+"_backup")
except FileNotFoundError as e:
evaluation_log = []
# [evaluation_log.append(experiment) for experiment in compare_deepfeatures.eval_all_combinations(datastore, DATASTORE_NAME, "imageNet_vgg19_bn_features")]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_deepfeatures.eval_all_combinations(datastore, DATASTORE_NAME, "places365_resnet50_feature_noFC")]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_compoelem.eval_all_combinations(datastore, DATASTORE_NAME)]
#fallback: yes, no
#filter_threshold: 150, 200, 250, 300
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 150, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 200, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 250, True)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 300, True)]
[evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 150, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 200, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 250, False)]
# [evaluation_log.append(experiment) for experiment in compare_compoelem_new.eval_all_combinations(datastore, DATASTORE_NAME, 300, False)]
# def eval_all_combinations(datastore, datastore_name, filter_threshold, with_fallback):
try:
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
except Exception as e:
print("open err",e)
# [evaluation_log.append(experiment) for experiment in compare_combined_vgg19.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_sift.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_orb.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_brief.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_combined_sift.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
# [evaluation_log.append(experiment) for experiment in compare_linkingArt.eval_all_combinations(datastore, DATASTORE_NAME)]
# pickle.dump(evaluation_log, open(EVAL_RESULTS_FILE, "wb"))
def get_new_evaluation_log():
evaluation_log = pickle.load(open(EVAL_RESULTS_FILE, "rb"))
new_log_entries = list(filter(lambda log_entry: log_entry["new"], evaluation_log))
return new_log_entries
print("new_log_entries: {}, evaluation_log_size:{}".format(len(get_new_evaluation_log()), len(evaluation_log)))
|
the-stack_0_20307 | """
Variation of Pyqtree
https://github.com/karimbahgat/Pyqtree
The MIT License (MIT)
Copyright (c) 2016 Karim Bahgat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__version__ = "0.30.0"
#PYTHON VERSION CHECK
import sys
PYTHON3 = int(sys.version[0]) == 3
if PYTHON3:
xrange = range
def _normalize_rect(rect):
x1, y1, x2, y2 = rect
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
return (x1, y1, x2, y2)
def _loopallchildren(parent):
for child in parent.children:
if child.children:
for subchild in _loopallchildren(child):
yield subchild
yield child
class _QuadNode(object):
def __init__(self, item, rect):
self.item = item
self.rect = rect
class _QuadTree(object):
"""
Internal backend version of the index.
The index being used behind the scenes. Has all the same methods as the user
index, but requires more technical arguments when initiating it than the
user-friendly version.
"""
def __init__(self, x, y, width, height, max_items, max_depth, _depth=0):
self.nodes = []
self.children = []
self.center = (x, y)
self.width, self.height = width, height
self.max_items = max_items
self.max_depth = max_depth
self._depth = _depth
def __iter__(self):
for child in _loopallchildren(self):
yield child
def _insert(self, item, bbox):
rect = _normalize_rect(bbox)
if len(self.children) == 0:
node = _QuadNode(item, rect)
self.nodes.append(node)
if len(self.nodes) > self.max_items and self._depth < self.max_depth:
self._split()
else:
self._insert_into_children(item, rect)
def _intersect(self, rect, results=None):
if results is None:
rect = _normalize_rect(rect)
results = set()
# search children
if self.children:
if rect[0] <= self.center[0]:
if rect[1] <= self.center[1]:
self.children[0]._intersect(rect, results)
if rect[3] >= self.center[1]:
self.children[1]._intersect(rect, results)
if rect[2] >= self.center[0]:
if rect[1] <= self.center[1]:
self.children[2]._intersect(rect, results)
if rect[3] >= self.center[1]:
self.children[3]._intersect(rect, results)
# search node at this level
for node in self.nodes:
if (node.rect[2] >= rect[0] and node.rect[0] <= rect[2] and
node.rect[3] >= rect[1] and node.rect[1] <= rect[3]):
results.add(node.item)
return results
def _insert_into_children(self, item, rect):
# if rect spans center then insert here
if (rect[0] <= self.center[0] and rect[2] >= self.center[0] and
rect[1] <= self.center[1] and rect[3] >= self.center[1]):
node = _QuadNode(item, rect)
self.nodes.append(node)
else:
# try to insert into children
if rect[0] <= self.center[0]:
if rect[1] <= self.center[1]:
self.children[0]._insert(item, rect)
if rect[3] >= self.center[1]:
self.children[1]._insert(item, rect)
if rect[2] > self.center[0]:
if rect[1] <= self.center[1]:
self.children[2]._insert(item, rect)
if rect[3] >= self.center[1]:
self.children[3]._insert(item, rect)
def _split(self):
quartwidth = self.width / 4.0
quartheight = self.height / 4.0
halfwidth = self.width / 2.0
halfheight = self.height / 2.0
x1 = self.center[0] - quartwidth
x2 = self.center[0] + quartwidth
y1 = self.center[1] - quartheight
y2 = self.center[1] + quartheight
new_depth = self._depth + 1
self.children = [_QuadTree(x1, y1, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth),
_QuadTree(x1, y2, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth),
_QuadTree(x2, y1, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth),
_QuadTree(x2, y2, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth)]
nodes = self.nodes
self.nodes = []
for node in nodes:
self._insert_into_children(node.item, node.rect)
def __len__(self):
"""
Returns:
- A count of the total number of members/items/nodes inserted
into this quadtree and all of its child trees.
"""
size = 0
for child in self.children:
size += len(child)
size += len(self.nodes)
return size
MAX_ITEMS = 10
MAX_DEPTH = 20
class Index(_QuadTree):
"""
The top spatial index to be created by the user. Once created it can be
populated with geographically placed members that can later be tested for
intersection with a user inputted geographic bounding box. Note that the
index can be iterated through in a for-statement, which loops through all
all the quad instances and lets you access their properties.
Example usage:
>>> spindex = Index(bbox=(0, 0, 100, 100))
>>> spindex.insert('duck', (50, 30, 53, 60))
>>> spindex.insert('cookie', (10, 20, 15, 25))
>>> spindex.insert('python', (40, 50, 95, 90))
>>> results = spindex.intersect((51, 51, 86, 86))
>>> sorted(results)
['duck', 'python']
"""
def __init__(self, bbox=None, x=None, y=None, width=None, height=None, max_items=MAX_ITEMS, max_depth=MAX_DEPTH):
"""
Initiate by specifying either 1) a bbox to keep track of, or 2) with an xy centerpoint and a width and height.
Parameters:
- **bbox**: The coordinate system bounding box of the area that the quadtree should
keep track of, as a 4-length sequence (xmin,ymin,xmax,ymax)
- **x**:
The x center coordinate of the area that the quadtree should keep track of.
- **y**
The y center coordinate of the area that the quadtree should keep track of.
- **width**:
How far from the xcenter that the quadtree should look when keeping track.
- **height**:
How far from the ycenter that the quadtree should look when keeping track
- **max_items** (optional): The maximum number of items allowed per quad before splitting
up into four new subquads. Default is 10.
- **max_depth** (optional): The maximum levels of nested subquads, after which no more splitting
occurs and the bottommost quad nodes may grow indefinately. Default is 20.
"""
if bbox:
x1, y1, x2, y2 = bbox
width, height = abs(x2-x1), abs(y2-y1)
midx, midy = x1+width/2.0, y1+height/2.0
super(Index, self).__init__(midx, midy, width, height, max_items, max_depth)
elif all([x, y, width, height]):
super(Index, self).__init__(x, y, width, height, max_items, max_depth)
else:
raise Exception("Either the bbox argument must be set, or the x, y, width, and height arguments must be set")
def insert(self, item, bbox):
"""
Inserts an item into the quadtree along with its bounding box.
Parameters:
- **item**: The item to insert into the index, which will be returned by the intersection method
- **bbox**: The spatial bounding box tuple of the item, with four members (xmin,ymin,xmax,ymax)
"""
self._insert(item, bbox)
def intersect(self, bbox):
"""
Intersects an input boundingbox rectangle with all of the items
contained in the quadtree.
Parameters:
- **bbox**: A spatial bounding box tuple with four members (xmin,ymin,xmax,ymax)
Returns:
- A list of inserted items whose bounding boxes intersect with the input bbox.
"""
return list(self._intersect(bbox))
@property
def count(self):
"""returns the size of the index/number of nodes"""
return len(self.nodes) |
the-stack_0_20308 | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import cv2
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.ops.roi_align import roi_align
class BaseInstanceMasks(metaclass=ABCMeta):
"""Base class for instance masks."""
@abstractmethod
def rescale(self, scale, interpolation='nearest'):
"""Rescale masks as large as possible while keeping the aspect ratio.
For details can refer to `mmcv.imrescale`.
Args:
scale (tuple[int]): The maximum size (h, w) of rescaled mask.
interpolation (str): Same as :func:`mmcv.imrescale`.
Returns:
BaseInstanceMasks: The rescaled masks.
"""
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
"""Resize masks to the given out_shape.
Args:
out_shape: Target (h, w) of resized mask.
interpolation (str): See :func:`mmcv.imresize`.
Returns:
BaseInstanceMasks: The resized masks.
"""
@abstractmethod
def flip(self, flip_direction='horizontal'):
"""Flip masks alone the given direction.
Args:
flip_direction (str): Either 'horizontal' or 'vertical'.
Returns:
BaseInstanceMasks: The flipped masks.
"""
@abstractmethod
def pad(self, out_shape, pad_val):
"""Pad masks to the given size of (h, w).
Args:
out_shape (tuple[int]): Target (h, w) of padded mask.
pad_val (int): The padded value.
Returns:
BaseInstanceMasks: The padded masks.
"""
@abstractmethod
def crop(self, bbox):
"""Crop each mask by the given bbox.
Args:
bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
Return:
BaseInstanceMasks: The cropped masks.
"""
@abstractmethod
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device,
interpolation='bilinear',
binarize=True):
"""Crop and resize masks by the given bboxes.
This function is mainly used in mask targets computation.
It firstly align mask to bboxes by assigned_inds, then crop mask by the
assigned bbox and resize to the size of (mask_h, mask_w)
Args:
bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
out_shape (tuple[int]): Target (h, w) of resized mask
inds (ndarray): Indexes to assign masks to each bbox,
shape (N,) and values should be between [0, num_masks - 1].
device (str): Device of bboxes
interpolation (str): See `mmcv.imresize`
binarize (bool): if True fractional values are rounded to 0 or 1
after the resize operation. if False and unsupported an error
will be raised. Defaults to True.
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
@abstractmethod
def to_ndarray(self):
"""Convert masks to the format of ndarray.
Return:
ndarray: Converted masks in the format of ndarray.
"""
@abstractmethod
def to_tensor(self, dtype, device):
"""Convert masks to the format of Tensor.
Args:
dtype (str): Dtype of converted mask.
device (torch.device): Device of converted masks.
Returns:
Tensor: Converted masks in the format of Tensor.
"""
@abstractmethod
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
Translated masks.
"""
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border. Default 0.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
ndarray: Sheared masks.
"""
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
Rotated masks.
"""
class BitmapMasks(BaseInstanceMasks):
"""This class represents masks in the form of bitmaps.
Args:
masks (ndarray): ndarray of masks in shape (N, H, W), where N is
the number of objects.
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.core.mask.structures import * # NOQA
>>> num_masks, H, W = 3, 32, 32
>>> rng = np.random.RandomState(0)
>>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)
>>> self = BitmapMasks(masks, height=H, width=W)
>>> # demo crop_and_resize
>>> num_boxes = 5
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (14, 14)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
self.height = height
self.width = width
if len(masks) == 0:
self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
else:
assert isinstance(masks, (list, np.ndarray))
if isinstance(masks, list):
assert isinstance(masks[0], np.ndarray)
assert masks[0].ndim == 2 # (H, W)
else:
assert masks.ndim == 3 # (N, H, W)
self.masks = np.stack(masks).reshape(-1, height, width)
assert self.masks.shape[1] == self.height
assert self.masks.shape[2] == self.width
def __getitem__(self, index):
"""Index the BitmapMask.
Args:
index (int | ndarray): Indices in the format of integer or ndarray.
Returns:
:obj:`BitmapMasks`: Indexed bitmap masks.
"""
masks = self.masks[index].reshape(-1, self.height, self.width)
return BitmapMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.rescale`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
else:
rescaled_masks = np.stack([
mmcv.imrescale(mask, scale, interpolation=interpolation)
for mask in self.masks
])
height, width = rescaled_masks.shape[1:]
return BitmapMasks(rescaled_masks, height, width)
def resize(self, out_shape, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.resize`."""
if len(self.masks) == 0:
resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
resized_masks = np.stack([
mmcv.imresize(
mask, out_shape[::-1], interpolation=interpolation)
for mask in self.masks
])
return BitmapMasks(resized_masks, *out_shape)
def flip(self, flip_direction='horizontal'):
"""See :func:`BaseInstanceMasks.flip`."""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = self.masks
else:
flipped_masks = np.stack([
mmcv.imflip(mask, direction=flip_direction)
for mask in self.masks
])
return BitmapMasks(flipped_masks, self.height, self.width)
def pad(self, out_shape, pad_val=0):
"""See :func:`BaseInstanceMasks.pad`."""
if len(self.masks) == 0:
padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
padded_masks = np.stack([
mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
for mask in self.masks
])
return BitmapMasks(padded_masks, *out_shape)
def crop(self, bbox):
"""See :func:`BaseInstanceMasks.crop`."""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = np.empty((0, h, w), dtype=np.uint8)
else:
cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
return BitmapMasks(cropped_masks, h, w)
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
if binarize:
resized_masks = (targets >= 0.5).cpu().numpy()
else:
resized_masks = targets.cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
def expand(self, expanded_h, expanded_w, top, left):
"""See :func:`BaseInstanceMasks.expand`."""
if len(self.masks) == 0:
expanded_mask = np.empty((0, expanded_h, expanded_w),
dtype=np.uint8)
else:
expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
dtype=np.uint8)
expanded_mask[:, top:top + self.height,
left:left + self.width] = self.masks
return BitmapMasks(expanded_mask, expanded_h, expanded_w)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0 for masks.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
BitmapMasks: Translated BitmapMasks.
Example:
>>> from mmdet.core.mask.structures import BitmapMasks
>>> self = BitmapMasks.random(dtype=np.uint8)
>>> out_shape = (32, 32)
>>> offset = 4
>>> direction = 'horizontal'
>>> fill_val = 0
>>> interpolation = 'bilinear'
>>> # Note, There seem to be issues when:
>>> # * out_shape is different than self's shape
>>> # * the mask dtype is not supported by cv2.AffineWarp
>>> new = self.translate(out_shape, offset, direction, fill_val,
>>> interpolation)
>>> assert len(new) == len(self)
>>> assert new.height, new.width == out_shape
"""
if len(self.masks) == 0:
translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
translated_masks = mmcv.imtranslate(
self.masks.transpose((1, 2, 0)),
offset,
direction,
border_value=fill_val,
interpolation=interpolation)
if translated_masks.ndim == 2:
translated_masks = translated_masks[:, :, None]
translated_masks = translated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(translated_masks, *out_shape)
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
BitmapMasks: The sheared masks.
"""
if len(self.masks) == 0:
sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
sheared_masks = mmcv.imshear(
self.masks.transpose((1, 2, 0)),
magnitude,
direction,
border_value=border_value,
interpolation=interpolation)
if sheared_masks.ndim == 2:
sheared_masks = sheared_masks[:, :, None]
sheared_masks = sheared_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(sheared_masks, *out_shape)
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
BitmapMasks: Rotated BitmapMasks.
"""
if len(self.masks) == 0:
rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
else:
rotated_masks = mmcv.imrotate(
self.masks.transpose((1, 2, 0)),
angle,
center=center,
scale=scale,
border_value=fill_val)
if rotated_masks.ndim == 2:
# case when only one mask, (h, w)
rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
rotated_masks = rotated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(rotated_masks, *out_shape)
@property
def areas(self):
"""See :py:attr:`BaseInstanceMasks.areas`."""
return self.masks.sum((1, 2))
def to_ndarray(self):
"""See :func:`BaseInstanceMasks.to_ndarray`."""
return self.masks
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
return torch.tensor(self.masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
dtype=np.uint8,
rng=None):
"""Generate random bitmap masks for demo / testing purposes.
Example:
>>> from mmdet.core.mask.structures import BitmapMasks
>>> self = BitmapMasks.random()
>>> print('self = {}'.format(self))
self = BitmapMasks(num_masks=3, height=32, width=32)
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
self = cls(masks, height=height, width=width)
return self
class PolygonMasks(BaseInstanceMasks):
"""This class represents masks in the form of polygons.
Polygons is a list of three levels. The first level of the list
corresponds to objects, the second level to the polys that compose the
object, the third level to the poly coordinates
Args:
masks (list[list[ndarray]]): The first level of the list
corresponds to objects, the second level to the polys that
compose the object, the third level to the poly coordinates
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.core.mask.structures import * # NOQA
>>> masks = [
>>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
>>> ]
>>> height, width = 16, 16
>>> self = PolygonMasks(masks, height, width)
>>> # demo translate
>>> new = self.translate((16, 16), 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
>>> # demo crop_and_resize
>>> num_boxes = 3
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (16, 16)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
assert isinstance(masks, list)
if len(masks) > 0:
assert isinstance(masks[0], list)
assert isinstance(masks[0][0], np.ndarray)
self.height = height
self.width = width
self.masks = masks
def __getitem__(self, index):
"""Index the polygon masks.
Args:
index (ndarray | List): The indices.
Returns:
:obj:`PolygonMasks`: The indexed polygon masks.
"""
if isinstance(index, np.ndarray):
index = index.tolist()
if isinstance(index, list):
masks = [self.masks[i] for i in index]
else:
try:
masks = self.masks[index]
except Exception:
raise ValueError(
f'Unsupported input of type {type(index)} for indexing!')
if len(masks) and isinstance(masks[0], np.ndarray):
masks = [masks] # ensure a list of three levels
return PolygonMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation=None):
"""see :func:`BaseInstanceMasks.rescale`"""
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
if len(self.masks) == 0:
rescaled_masks = PolygonMasks([], new_h, new_w)
else:
rescaled_masks = self.resize((new_h, new_w))
return rescaled_masks
def resize(self, out_shape, interpolation=None):
"""see :func:`BaseInstanceMasks.resize`"""
if len(self.masks) == 0:
resized_masks = PolygonMasks([], *out_shape)
else:
h_scale = out_shape[0] / self.height
w_scale = out_shape[1] / self.width
resized_masks = []
for poly_per_obj in self.masks:
resized_poly = []
for p in poly_per_obj:
p = p.copy()
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_poly.append(p)
resized_masks.append(resized_poly)
resized_masks = PolygonMasks(resized_masks, *out_shape)
return resized_masks
def flip(self, flip_direction='horizontal'):
"""see :func:`BaseInstanceMasks.flip`"""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = PolygonMasks([], self.height, self.width)
else:
flipped_masks = []
for poly_per_obj in self.masks:
flipped_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if flip_direction == 'horizontal':
p[0::2] = self.width - p[0::2]
elif flip_direction == 'vertical':
p[1::2] = self.height - p[1::2]
else:
p[0::2] = self.width - p[0::2]
p[1::2] = self.height - p[1::2]
flipped_poly_per_obj.append(p)
flipped_masks.append(flipped_poly_per_obj)
flipped_masks = PolygonMasks(flipped_masks, self.height,
self.width)
return flipped_masks
def crop(self, bbox):
"""see :func:`BaseInstanceMasks.crop`"""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = PolygonMasks([], h, w)
else:
cropped_masks = []
for poly_per_obj in self.masks:
cropped_poly_per_obj = []
for p in poly_per_obj:
# pycocotools will clip the boundary
p = p.copy()
p[0::2] = p[0::2] - bbox[0]
p[1::2] = p[1::2] - bbox[1]
cropped_poly_per_obj.append(p)
cropped_masks.append(cropped_poly_per_obj)
cropped_masks = PolygonMasks(cropped_masks, h, w)
return cropped_masks
def pad(self, out_shape, pad_val=0):
"""padding has no effect on polygons`"""
return PolygonMasks(self.masks, *out_shape)
def expand(self, *args, **kwargs):
"""TODO: Add expand for polygon"""
raise NotImplementedError
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""see :func:`BaseInstanceMasks.crop_and_resize`"""
out_h, out_w = out_shape
if len(self.masks) == 0:
return PolygonMasks([], out_h, out_w)
if not binarize:
raise ValueError('Polygons are always binary, '
'setting binarize=False is unsupported')
resized_masks = []
for i in range(len(bboxes)):
mask = self.masks[inds[i]]
bbox = bboxes[i, :]
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
h_scale = out_h / max(h, 0.1) # avoid too large scale
w_scale = out_w / max(w, 0.1)
resized_mask = []
for p in mask:
p = p.copy()
# crop
# pycocotools will clip the boundary
p[0::2] = p[0::2] - bbox[0]
p[1::2] = p[1::2] - bbox[1]
# resize
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_mask.append(p)
resized_masks.append(resized_mask)
return PolygonMasks(resized_masks, *out_shape)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=None,
interpolation=None):
"""Translate the PolygonMasks.
Example:
>>> self = PolygonMasks.random(dtype=np.int)
>>> out_shape = (self.height, self.width)
>>> new = self.translate(out_shape, 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
"""
assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
f'used, and defaultly should be None or 0. got {fill_val}.'
if len(self.masks) == 0:
translated_masks = PolygonMasks([], *out_shape)
else:
translated_masks = []
for poly_per_obj in self.masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
translated_poly_per_obj.append(p)
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.shear`."""
if len(self.masks) == 0:
sheared_masks = PolygonMasks([], *out_shape)
else:
sheared_masks = []
if direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32)
elif direction == 'vertical':
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for poly_per_obj in self.masks:
sheared_poly = []
for p in poly_per_obj:
p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
new_coords = np.matmul(shear_matrix, p) # [2, n]
new_coords[0, :] = np.clip(new_coords[0, :], 0,
out_shape[1])
new_coords[1, :] = np.clip(new_coords[1, :], 0,
out_shape[0])
sheared_poly.append(
new_coords.transpose((1, 0)).reshape(-1))
sheared_masks.append(sheared_poly)
sheared_masks = PolygonMasks(sheared_masks, *out_shape)
return sheared_masks
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""See :func:`BaseInstanceMasks.rotate`."""
if len(self.masks) == 0:
rotated_masks = PolygonMasks([], *out_shape)
else:
rotated_masks = []
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
for poly_per_obj in self.masks:
rotated_poly = []
for p in poly_per_obj:
p = p.copy()
coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coords = np.concatenate(
(coords, np.ones((coords.shape[0], 1), coords.dtype)),
axis=1) # [n, 3]
rotated_coords = np.matmul(
rotate_matrix[None, :, :],
coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
out_shape[1])
rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
out_shape[0])
rotated_poly.append(rotated_coords.reshape(-1))
rotated_masks.append(rotated_poly)
rotated_masks = PolygonMasks(rotated_masks, *out_shape)
return rotated_masks
def to_bitmap(self):
"""convert polygon masks to bitmap masks."""
bitmap_masks = self.to_ndarray()
return BitmapMasks(bitmap_masks, self.height, self.width)
@property
def areas(self):
"""Compute areas of masks.
This func is modified from `detectron2
<https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.
The function only works with Polygons using the shoelace formula.
Return:
ndarray: areas of each instance
""" # noqa: W501
area = []
for polygons_per_obj in self.masks:
area_per_obj = 0
for p in polygons_per_obj:
area_per_obj += self._polygon_area(p[0::2], p[1::2])
area.append(area_per_obj)
return np.asarray(area)
def _polygon_area(self, x, y):
"""Compute the area of a component of a polygon.
Using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Args:
x (ndarray): x coordinates of the component
y (ndarray): y coordinates of the component
Return:
float: the are of the component
""" # noqa: 501
return 0.5 * np.abs(
np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def to_ndarray(self):
"""Convert masks to the format of ndarray."""
if len(self.masks) == 0:
return np.empty((0, self.height, self.width), dtype=np.uint8)
bitmap_masks = []
for poly_per_obj in self.masks:
bitmap_masks.append(
polygon_to_bitmap(poly_per_obj, self.height, self.width))
return np.stack(bitmap_masks)
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
if len(self.masks) == 0:
return torch.empty((0, self.height, self.width),
dtype=dtype,
device=device)
ndarray_masks = self.to_ndarray()
return torch.tensor(ndarray_masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
n_verts=5,
dtype=np.float32,
rng=None):
"""Generate random polygon masks for demo / testing purposes.
Adapted from [1]_
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
Example:
>>> from mmdet.core.mask.structures import PolygonMasks
>>> self = PolygonMasks.random()
>>> print('self = {}'.format(self))
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
def _gen_polygon(n, irregularity, spikeyness):
"""Creates the polygon by sampling points on a circle around the
centre. Random noise is added by varying the angular spacing
between sequential points, and by varying the radial distance of
each point from the centre.
Based on original code by Mike Ounsworth
Args:
n (int): number of vertices
irregularity (float): [0,1] indicating how much variance there
is in the angular spacing of vertices. [0,1] will map to
[0, 2pi/numberOfVerts]
spikeyness (float): [0,1] indicating how much variance there is
in each vertex from the circle of radius aveRadius. [0,1]
will map to [0, aveRadius]
Returns:
a list of vertices, in CCW order.
"""
from scipy.stats import truncnorm
# Generate around the unit circle
cx, cy = (0.0, 0.0)
radius = 1
tau = np.pi * 2
irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
spikeyness = np.clip(spikeyness, 1e-9, 1)
# generate n angle steps
lower = (tau / n) - irregularity
upper = (tau / n) + irregularity
angle_steps = rng.uniform(lower, upper, n)
# normalize the steps so that point 0 and point n+1 are the same
k = angle_steps.sum() / (2 * np.pi)
angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
# Convert high and low values to be wrt the standard normal range
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
low = 0
high = 2 * radius
mean = radius
std = spikeyness
a = (low - mean) / std
b = (high - mean) / std
tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
# now generate the points
radii = tnorm.rvs(n, random_state=rng)
x_pts = cx + radii * np.cos(angles)
y_pts = cy + radii * np.sin(angles)
points = np.hstack([x_pts[:, None], y_pts[:, None]])
# Scale to 0-1 space
points = points - points.min(axis=0)
points = points / points.max(axis=0)
# Randomly place within 0-1 space
points = points * (rng.rand() * .8 + .2)
min_pt = points.min(axis=0)
max_pt = points.max(axis=0)
high = (1 - max_pt)
low = (0 - min_pt)
offset = (rng.rand(2) * (high - low)) + low
points = points + offset
return points
def _order_vertices(verts):
"""
References:
https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
"""
mlat = verts.T[0].sum() / len(verts)
mlng = verts.T[1].sum() / len(verts)
tau = np.pi * 2
angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
tau) % tau
sortx = angle.argsort()
verts = verts.take(sortx, axis=0)
return verts
# Generate a random exterior for each requested mask
masks = []
for _ in range(num_masks):
exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
exterior = (exterior * [(width, height)]).astype(dtype)
masks.append([exterior.ravel()])
self = cls(masks, height, width)
return self
def polygon_to_bitmap(polygons, height, width):
"""Convert masks from the form of polygons to bitmaps.
Args:
polygons (list[ndarray]): masks in polygon representation
height (int): mask height
width (int): mask width
Return:
ndarray: the converted masks in bitmap representation
"""
rles = maskUtils.frPyObjects(polygons, height, width)
rle = maskUtils.merge(rles)
bitmap_mask = maskUtils.decode(rle).astype(np.bool)
return bitmap_mask
|
the-stack_0_20310 | from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from googleapiclient import errors
from .models import Profile
import string
from datetime import datetime
import requests
from requests import exceptions as requests_errors
from google.auth.exceptions import RefreshError
from .social_auth_credentials import Credentials
#from google.oauth2.credentials import Credentials
from social_django.utils import load_strategy
from .models import JobApplication
from .models import ApplicationStatus
from .models import JobPostDetail
import base64
import time
from .gmail_utils import convertTime
from .gmail_utils import removeHtmlTags
from .gmail_utils import find_nth
from .linkedin_utils import parse_job_detail
def get_email_detail(service, user_id, msg_id, user, source):
"""Get a Message with given ID.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: The ID of the Message required.
Returns:
A Message.
"""
try:
custom_image_url = '/static/images/JobHax-logo-black.svg'
message = service.users().messages().get(userId=user_id, id=msg_id, format='full').execute()
jobTitle = ''
company = ''
image_url = ''
for header in message['payload']['headers']:
if header['name'] == 'Subject':
subject = str(header['value'])
if(source == 'LinkedIn'):
jobTitle = subject[subject.index('for ') + 4 : subject.index(' at ')]
company = subject[subject.index('at ') + 3:]
elif(source == 'Hired.com'):
jobTitle = subject[subject.index('st: ') + 4 : subject.index(' at ')]
company = subject[subject.index('at ') + 3 : subject.index('(')]
elif(source == 'Indeed'):
jobTitle = subject[subject.index('Indeed Application: ') + 20 : ]
elif header['name'] == 'Date':
date = header['value']
date = convertTime(str(date))
try:
for part in message['payload']['parts']:
if(part['mimeType'] == 'text/html'):
#get mail's body as a string
body = str(base64.urlsafe_b64decode(part['body']['data'].encode('ASCII')))
if(source == 'LinkedIn'):
posterInformationJSON, decoratedJobPostingJSON, topCardV2JSON = parse_job_detail(body)
s = find_nth(body, 'https://media.licdn.com', 2)
if(s != -1):
e = find_nth(body, '" alt="' + company + '"', 1)
image_url = body[s : e].replace('&', '&')
image_exists=requests.get(image_url)
if(image_exists.status_code == 404):
image_url = custom_image_url
else:
image_url = custom_image_url
if len(image_url) > 300:
image_url = custom_image_url
elif(source == 'Vettery'):
jobTitle = body[body.index('Role: ') + 6 : body.index('Salary')]
jobTitle = removeHtmlTags(jobTitle)
company = body[body.index('interview with ') + 15 : body.index('. Interested?')]
image_url = custom_image_url
elif(source == 'Indeed'):
company = body[body.index('Get job updates from <b>') + 24 : body.index('</b>.<br><i>By selecting')]
image_url = custom_image_url
elif(source == 'Hired.com'):
image_url = custom_image_url
except Exception as e:
print(e)
if user.is_authenticated:
inserted_before = JobApplication.objects.all().filter(msgId=msg_id)
print(image_url)
if not inserted_before and jobTitle != '' and company != '':
status = ApplicationStatus.objects.all().get(value='N/A')
japp = JobApplication(jobTitle=jobTitle, company=company, applyDate=date, msgId=msg_id, source = source, user = user, companyLogo = image_url, applicationStatus = status)
japp.save()
if(source == 'LinkedIn'):
japp_details = JobPostDetail(job_post = japp, posterInformation = posterInformationJSON, decoratedJobPosting = decoratedJobPostingJSON, topCardV2 = topCardV2JSON)
japp_details.save()
except errors.HttpError as error:
print('An error occurred: %s' % error)
def get_emails_with_custom_query(service, user_id, query=''):
"""List all Messages of the user's mailbox matching the query.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
query: String used to filter messages returned.
Eg.- 'from:user@some_domain.com' for Messages from a particular sender.
Returns:
List of Messages that match the criteria of the query. Note that the
returned list contains Message IDs, you must use get with the
appropriate ID to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id,
q=query, includeSpamTrash=True).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query,
pageToken=page_token, includeSpamTrash=True).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError as error:
print('An error occurred: %s' % error)
def fetchJobApplications(user):
time_string = ''
#checks user last update time and add it as a query parameter
profile = Profile.objects.get(user=user)
if profile.gmail_last_update_time != 0:
time_string = ' AND after:' + str(profile.gmail_last_update_time)
print('its not the first time query will be added : ' + time_string)
else:
print('its the first time.. so we are querying all mails')
try:
#initiates Gmail API
usa = user.social_auth.get(provider='google-oauth2')
cre = Credentials(usa)
GMAIL = build('gmail', 'v1', credentials=cre)
#usa = user.social_auth.get(provider='google-oauth2')
#strategy = load_strategy()
#usa.refresh_token(strategy)
#creds= Credentials(usa.extra_data['access_token'])
#GMAIL = build('gmail', 'v1', credentials=creds)
#retrieves user email's with custom query parameter
linkedInMessages = get_emails_with_custom_query(GMAIL, 'me', 'from:[email protected] AND subject:You applied for' + time_string)# AND after:2018/01/01')
hiredMessages = get_emails_with_custom_query(GMAIL, 'me', 'from:[email protected] AND subject:Interview Request' + time_string)
#vetteryMessages = get_emails_with_custom_query(GMAIL, 'me', 'from:@connect.vettery.com AND subject:Interview Request' + time_string)
indeedMessages = get_emails_with_custom_query(GMAIL, 'me', 'from:[email protected] AND subject:Indeed Application' + time_string)
#retvieves specific email's detail one by one
for message in linkedInMessages:
get_email_detail(GMAIL, 'me', message['id'], user, 'LinkedIn')
for message in hiredMessages:
get_email_detail(GMAIL, 'me', message['id'], user, 'Hired.com')
for message in indeedMessages:
get_email_detail(GMAIL, 'me', message['id'], user, 'Indeed')
#for message in vetteryMessages:
# GetMessage(GMAIL, 'me', message['id'], user, 'Vettery')
#updates user last update time after all this
now = datetime.utcnow().timestamp()
profile.gmail_last_update_time = now
profile.save()
except Exception as error:
print('An error occurred: %s' % error) |
the-stack_0_20311 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return ''
stack = [root]
parts = []
while stack:
curr = stack.pop()
if curr is None:
parts.append(')')
else:
parts.append('%s(' % curr.val)
stack.append(None)
if curr.left:
stack.append(curr.left)
if curr.right:
stack.append(curr.right)
return ''.join(parts)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
num = ''
stack = []
for char in data:
if char == '(':
curr = TreeNode(int(num))
num = ''
if stack:
parent = stack[-1]
if curr.val < parent.val:
parent.left = curr
else:
parent.right = curr
else:
root = curr
stack.append(curr)
elif char == ')':
stack.pop()
else:
num += char
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root)) |
the-stack_0_20312 | import os
import logging
from recommender.infrastructure.lastfm import LastFMListeningRepository
from recommender.infrastructure.http.flask.server import FlaskServer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=getattr(logging, os.getenv("LOG_LEVEL", "INFO"))
)
app = FlaskServer().app
repository = LastFMListeningRepository(
os.environ["LASTFM_API_KEY"],
os.environ["LASTFM_API_SECRET"],
os.environ["LASTFM_USERNAME"]
)
tracks = repository.get_tracks()
if __name__ == "__main__":
print(tracks)
|
the-stack_0_20314 | import discord
import asyncio
import youtube_dl
import aiohttp
from discord.ext import commands
from discord.ext.commands import MissingRequiredArgument
from fuzzywuzzy import process, fuzz
from reciters import get_surah_reciter, get_ayah_reciter, everyayah_reciters
RECITATION_NOT_FOUND = ":x: **Could not find a recitation for the surah by this reciter.** Try a different surah."
RECITER_NOT_FOUND = ":x: **Couldn't find reciter!** Type `-reciters` for a list of available reciters."
SURAH_NOT_FOUND = ":x: **Surah not found.** Use the surah's name or number. Examples: \n\n`-qplay surah" \
" al-fatihah`\n\n`-qplay surah 1`"
PAGE_NOT_FOUND = ":x: **Sorry, the page must be between 1 and 604.**"
DISCONNECTED = ":white_check_mark: **Successfully disconnected.**"
INVALID_VOLUME = ":x: **The volume must be between 0 and 100.**"
INVALID_VERSE = ":x: **Please provide a verse.** For example, 1:2 is Surah al-Fatiha, ayah 2."
NON_EXISTENT_VERSE = ":x: **There are only {} verses in this surah.**"
ALREADY_PLAYING = ":x: **Already playing**. To stop playing, type `-qstop`."
NOT_PLAYING = ":x: The bot is not playing."
RESUMED = ":arrow_forward: **Resumed**."
PAUSED = ":pause_button: **Paused**."
NO_PRIVATE_MESSAGES = "Sorry, the bot cannot be used in DMs."
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0', # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
async def get_surah_names():
async with aiohttp.ClientSession() as session:
async with session.get('http://api.quran.com/api/v3/chapters') as r:
data = await r.json()
surahs = data['chapters']
surah_names = {}
for surah in surahs:
surah_names[surah['name_simple'].lower()] = surah['id']
return surah_names
async def get_surah_id_from_name(surah_name):
surah_names = await get_surah_names()
surah_id = surah_names[surah_name]
return surah_id
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Quran(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
self.session = aiohttp.ClientSession(loop=bot.loop)
self.info_url = 'http://api.quran.com/api/v3/chapters/{}'
self.reciter_info_url = 'http://mp3quran.net/api/_english.php'
self.radio_url_1 = 'https://Qurango.net/radio/tarateel'
self.page_url = 'https://everyayah.com/data/{}/PageMp3s/Page{}.mp3'
self.ayah_url = 'https://everyayah.com/data/{}/{}.mp3'
self.mushaf_url = 'https://www.searchtruth.org/quran/images1/{}.jpg'
async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):
await ctx.send(':x: **Error**: *{}*'.format(str(error)))
print(error)
def make_page_url(self, page, reciter):
try:
url_reciter = everyayah_reciters[reciter]
except KeyError:
return None
url_page = str(page).zfill(3)
url = self.page_url.format(url_reciter, url_page)
return url, url_page
async def get_surah_info(self, surah):
async with self.session.get(self.info_url.format(surah)) as r:
data = await r.json()
name = data['chapter']['name_simple']
arabic_name = data['chapter']['name_arabic']
return name, arabic_name
@staticmethod
def get_surah_file(url, surah):
file_name = str(surah).zfill(3) + '.mp3'
file_url = f'{url}/{file_name}'
return file_url
@staticmethod
def get_ayah_file(reciter, surah, ayah):
file_name = str(surah).zfill(3) + str(ayah).zfill(3) + '.mp3'
file_url = f'{reciter.ayah_url}/{file_name}'
return file_url
async def get_verse_count(self, surah):
async with self.session.get(self.info_url.format(surah)) as r:
data = await r.json()
verses_count = data['chapter']['verses_count']
verses_count = int(verses_count)
return verses_count
def make_embed(self, title, description, footer, colour, image=None):
em = discord.Embed(title=title, colour=colour, description=description)
em.set_footer(text=footer)
if image is not None:
em.set_image(url=image)
return em
async def create_player(self, ctx, url):
try:
player = await YTDLSource.from_url(url, loop=self.bot.loop, stream=True)
except:
return await ctx.send(RECITATION_NOT_FOUND)
self.voice_states[ctx.guild.id] = player
try:
ctx.voice_client.play(player, after=lambda x: asyncio.run_coroutine_threadsafe(ctx.voice_client.disconnect()
, self.bot.loop))
except discord.errors.ClientException as e:
return print(e)
@commands.group()
async def qplay(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send('**Invalid arguments**. For help, type `-qhelp qplay`.')
@qplay.command()
async def surah(self, ctx, surah, *, reciter: str = 'Mishary Alafasi'):
try:
surah = int(surah)
except ValueError:
try:
surah = await get_surah_id_from_name(surah.lower())
# We try to suggest a correction if an invalid surah name string is given.
except KeyError:
surah_names = await get_surah_names()
result = process.extract(surah, surah_names.keys(), scorer=fuzz.partial_ratio, limit=1)
if result is not None:
await ctx.send(f'Closest match: *{result[0][0]}*')
surah = await get_surah_id_from_name(result[0][0].lower())
else:
raise commands.CommandError(SURAH_NOT_FOUND)
reciter = await get_surah_reciter(reciter.lower())
if reciter is None:
return await ctx.send(RECITER_NOT_FOUND)
if not 0 < surah <= 114:
return await ctx.send(SURAH_NOT_FOUND)
file_url = self.get_surah_file(reciter.server, surah)
await self.create_player(ctx, file_url)
transliterated_surah, arabic_surah = await self.get_surah_info(surah)
description = f'**Playing**: Surah {transliterated_surah} ({arabic_surah}).\n**Reciter:** {reciter.name}.' \
f'\n**Riwayah**: *{reciter.riwayah}*'
em = self.make_embed("Qurʼān", description, f'Requested by {ctx.message.author}', 0x006400)
await ctx.send(embed=em)
@qplay.command()
async def ayah(self, ctx, ref: str, *, reciter: str = 'Mishary Alafasi'):
try:
surah, ayah = ref.split(':')
surah = int(surah)
ayah = int(ayah)
except:
return await ctx.send("Invalid arguments. Commands: `-qplay ayah <surah>:<ayah> <reciter>`."
"\n\nExample: `-qplay ayah 2:255 abdul rahman al-sudais`.")
reciter = await get_ayah_reciter(reciter.lower())
if reciter is None:
return await ctx.send(RECITER_NOT_FOUND)
if not 0 < surah <= 114:
return await ctx.send(SURAH_NOT_FOUND)
verse_count = await self.get_verse_count(surah)
if ayah > verse_count:
return await ctx.send(NON_EXISTENT_VERSE.format(verse_count))
url = self.get_ayah_file(reciter, surah, ayah)
await self.create_player(ctx, url)
transliterated_surah, arabic_surah = await self.get_surah_info(surah)
description = f'**Playing**: Surah {transliterated_surah} ({arabic_surah}), Ayah {ayah}. ' \
f'\n**Reciter**: {reciter.name} *({reciter.mushaf_type})*\n**Riwayah**: *{reciter.riwayah}*'
em = self.make_embed("Qurʼān", description, f'Requested by {ctx.message.author}', 0x006400,
f'https://everyayah.com/data/QuranText_jpg/{surah}_{ayah}.jpg')
await ctx.send(embed=em)
@qplay.command()
async def page(self, ctx, page: int, *, reciter: str = 'mishary al-afasy'):
try:
page = int(page)
except:
return await ctx.send("Invalid arguments. Commands: `-qpage <page>:<ayah> <reciter>`."
"\n\nExample: `-qayah 604 abdul rahman al-sudais`.")
reciter = reciter.lower()
readable_reciter = reciter.replace('-', ' - ').title().replace(' - ', '-')
if reciter not in everyayah_reciters:
return await ctx.send(RECITER_NOT_FOUND)
if not 0 < page <= 604:
return await ctx.send(PAGE_NOT_FOUND)
url, url_page = self.make_page_url(page, reciter)
await self.create_player(ctx, url)
description = f'**Playing**: Page {page}.\n**Reciter**: {readable_reciter}.'
em = self.make_embed("Qurʼān", description, f'Requested by {ctx.message.author}', 0x006400,
f'https://www.searchtruth.org/quran/images2/large/page-{url_page}.jpeg')
await ctx.send(embed=em)
@surah.error
@ayah.error
@page.error
async def error_handler(self, ctx, error):
if isinstance(error, MissingRequiredArgument):
await ctx.send("You typed the command wrongly. Type `-qhelp qplay` for help.")
@commands.command()
async def qstop(self, ctx):
voice_client = discord.utils.get(ctx.bot.voice_clients, guild=ctx.guild)
if voice_client is not None:
await voice_client.disconnect()
await ctx.send(DISCONNECTED)
else:
await ctx.send(NOT_PLAYING)
@commands.command()
async def qpause(self, ctx):
voice_client = discord.utils.get(ctx.bot.voice_clients, guild=ctx.guild)
if voice_client is not None and voice_client.is_playing():
voice_client.pause()
await ctx.send(PAUSED)
@commands.command()
async def qresume(self, ctx):
voice_client = discord.utils.get(ctx.bot.voice_clients, guild=ctx.guild)
if voice_client is not None and voice_client.is_paused():
voice_client.resume()
await ctx.send(RESUMED)
@commands.command()
async def qlive(self, ctx, *, link: str = 'short recitations'):
link = link.lower()
if link == 'short recitations':
player = await YTDLSource.from_url(self.radio_url_1, loop=self.bot.loop, stream=True)
ctx.voice_client.play(player)
await ctx.send("Now playing **mp3quran.net radio**: *short recitations* (الإذاعة العامة - اذاعة متنوعة لمختلف القراء).")
@commands.command(name="qvolume")
async def qvolume(self, ctx, volume: int):
if not 0 <= volume <= 100:
return await ctx.send(INVALID_VOLUME)
if ctx.voice_client is None:
return await ctx.send("Not connected to a voice channel.")
ctx.voice_client.source.volume = volume / 100
await ctx.send(f"Changed volume to **{volume}%**.")
@ayah.before_invoke
@surah.before_invoke
@page.before_invoke
@qlive.before_invoke
async def join_voice(self, ctx):
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandError('You are not connected to any voice channel.')
elif ctx.voice_client:
if ctx.voice_client.channel != ctx.author.voice.channel:
raise commands.CommandError('Bot is already in a voice channel.')
elif ctx.voice_client.is_playing():
raise commands.CommandError('Bot is already playing.')
else:
await ctx.author.voice.channel.connect()
# Leave empty voice channels to conserve bandwidth.
#@commands.Cog.listener()
#async def on_voice_state_update(self, _, before, after):
#if after.channel is None:
#if len(before.channel.members) == 1 and self.bot.user in before.channel.members:
#voice_client = discord.utils.get(self.bot.voice_clients, guild=before.channel.guild)
#if voice_client is not None:
#await voice_client.disconnect()
def setup(bot):
bot.add_cog(Quran(bot))
|
the-stack_0_20317 | import scipy.ndimage as ndi
import numpy as np
from cv2.ximgproc import guidedFilter
from matplotlib import pyplot as plt
from ietk.methods.sharpen_img import sharpen
from ietk import util
from ietk.data import IDRiD
def solvet(I, A, use_gf=True, fsize=(5,5)):
z = 1-ndi.minimum_filter((I/A).min(-1), fsize)
if use_gf:
z = gf(I, z)
rv = z.reshape(*I.shape[:2], 1)
return rv
# def solvet_perchannel(I, A, use_gf=True, fsize=(5,5,0)):
# z = 1-ndi.minimum_filter((I/A), fsize)
# if use_gf:
# z = gf(I, z)
# return z
def solvetmax(I, A):
z = 1-ndi.maximum_filter((I/A).max(-1), (5, 5))
return gf(I, z).reshape(*I.shape[:2], 1)
def solveJ(I, A, t):
epsilon = max(np.min(t)/2, 1e-8)
return (I-A)/np.maximum(t, epsilon) + A
def gf(guide, src, r=100, eps=1e-8):
return guidedFilter(guide.astype('float32'), src.astype('float32'), r, eps).astype('float64')
def ta(img, ignore_ch=None, **kws):
if ignore_ch is not None:
I = img.copy()
I[:,:,ignore_ch] = 0
else:
I = img
return solvet(1-img, 1, **kws)
def td(img, ignore_ch=None, **kws):
if ignore_ch is not None:
I = img.copy()
I[:,:,ignore_ch] = 0
else:
I = img
return 1-solvet(1-img, 1, **kws)
def tb(img, ignore_ch=None, **kws):
if ignore_ch is not None:
I = img.copy()
I[:,:,ignore_ch] = 1
else:
I = img
return solvet(I, 1, **kws)
def tc(img, ignore_ch=None, **kws):
if ignore_ch is not None:
I = img.copy()
I[:,:,ignore_ch] = 1
else:
I = img
return 1-solvet(I, 1, **kws)
def A(img):
return solveJ(img, 0, ta(img))
def B(img):
return solveJ(img, 0, tb(img))
def C(img):
return solveJ(img, 0, tc(img))
def D(img):
return solveJ(img, 0, td(img))
def W(img):
return solveJ(img, 1, ta(img))
def X(img):
return solveJ(img, 1, tb(img))
def Y(img):
return solveJ(img, 1, tc(img))
def Z(img):
return solveJ(img, 1, td(img))
def B_ret(img):
"""specific to retinal fundus images, where blue channel is too sparse"""
return solveJ(img, 0, tb(img, ignore_ch=2))
def C_ret(img):
"""specific to retinal fundus images, where blue channel is too sparse"""
return solveJ(img, 0, tc(img, ignore_ch=2))
def X_ret(img):
"""specific to retinal fundus images, where blue channel is too sparse"""
return solveJ(img, 1, tb(img, ignore_ch=2))
def Y_ret(img):
"""specific to retinal fundus images, where blue channel is too sparse"""
return solveJ(img, 1, tc(img, ignore_ch=2))
def brighten_darken(img, method_name: str, focus_region=None,
fundus_image: bool=True):
"""
Apply a brightening or darkening method, following the ICIAR2020 paper,
Enhancement of Retinal Fundus Images via Pixel Color Amplification.
`img`: a (h,w,c) fundus image normalized into [0,1] range. The
number of color channels, c > 0.
`method_name: str` - any combination of letters individual letters
'ABCDWXYZ', each optionally prefixed by 's' and separated by a '+'. The
's' performs sharpening. Each of the other letters refers to a brightening
(A,B,C or D) or darkening (W,X,Y,Z) operation. See the Pixel Color
Amplification paper for details (ICIAR 2020).
Example method names you can try: 'A+B+W+X' or 'sA+sC+sX+sZ'
`focus_region` - a foreground boolean mask specifying which pixels of image
to sharpen
`fundus_image: bool` When True, ensures that the methods B,C,X,Y ignore the
blue channel. This assumes the given input image is in RGB format.
The blue channel in fundus images gives a dark channel that is very noisy.
Side Note: If you want to brighten or darken different image domains,
you're probably going to want to optimize the neighborhood size in
solvet(fsize=(...)) and the guided filter parameters gf(...). In this
case, you should just build your own function using solvet and solveJ
directly.
"""
func_names = method_name.split('+')
if fundus_image:
_methods = dict(zip('ABCDWXYZ', [A,B_ret,C_ret,D,W,X_ret,Y_ret,Z]))
else:
_methods = dict(zip('ABCDWXYZ', [A,B,C,D,W,X,Y,Z]))
I2 = np.zeros_like(img)
for func_name in func_names:
tmp = _methods[func_name.lstrip('s')](img)
if func_name.startswith('s'):
tmp = sharpen(tmp, ~focus_region)
I2 += tmp
I2 /= len(func_names)
return I2
def resizeforplot(img):
import PIL
size = (np.array(img.shape)/2).astype('int')
return np.asarray(PIL.Image.fromarray((img.clip(0, 1)*255).astype('uint8')).resize(
(size[1], size[0])))
if __name__ == "__main__":
import os
os.makedirs('data/plots/brighten_darken/', exist_ok=True) # save dir
# load an image
dset = IDRiD('./data/IDRiD_segmentation')
img_id, img, labels = dset.sample()
print("using image", img_id)
# img, labels = dset['IDRiD_46']
# he = labels['HE']
# ma = labels['MA']
# ex = labels['EX']
# se = labels['SE']
# od = labels['OD']
# set background pure black.
I = img.copy()
# I = img[1500:2500, 1500:2500, :]
# labels = {k: v[1500:2500, 1500:2500] for k, v in labels.items()}
# bg = np.zeros_like(I, dtype='bool')
I, fg = util.center_crop_and_get_foreground_mask(I)
bg = ~fg
I[bg] = 0
# four transmission maps, for retinal images
a,b,c,d = [ta(I), tb(I, ignore_ch=2), tc(I, ignore_ch=2), td(I)]
# a = solvet(1-I, 1) # == 1-solvetmax(I, 1)
# d = 1-solvet(1-I, 1) # == solvetmax(I, 1)
# I2 = I.copy()
# I2[:,:,2] = 1 # the min values of blue channel is too noise
# c = 1-solvet(I2, 1) # == solvetmax(1-I, 1)
# b = solvet(I2, 1) # == 1-solvetmax(1-I, 1)
# # a = solvet(1-I, 1, False, (50,20)) # == 1-solvetmax(I, 1)
# # d = 1-solvet(1-I, 1, False, (50,20)) # == solvetmax(I, 1)
# # c = 1-solvet(I, 1, False, (50,20)) # == solvetmax(1-I, 1)
# # b = solvet(I, 1, False, (50,20)) # == 1-solvetmax(1-I, 1)
# Brighten
A, B, C, D = A(I), B_ret(I), C_ret(I), D(I)
# Darken
W, X, Y, Z = W(I), X_ret(I), Y_ret(I), Z(I)
# Plots
print('plotting')
kws = dict(bt='\mathbf{t}', bI='\mathbf{I}', bA='\mathbf{A}')
t_eqs = [
r'${bt} = $solve_t$(1-{bI}, {bA}=1)$'.format(**kws),
r'${bt} = $solve_t$({bI}, {bA}=1)$'.format(**kws),
r'${bt} = 1-$solve_t$({bI}, {bA}=1)$'.format(**kws),
r'${bt} = 1-$solve_t$(1-{bI}, {bA}=1)$'.format(**kws),
]
f, axs = plt.subplots(2,2, num=1, figsize=(10,10))
f.suptitle('Transmission maps', fontsize=28)
axs2 = []
for n,(ax, t) in enumerate(zip(axs.ravel(), [a,b,c,d])):
ax.imshow(resizeforplot(t.squeeze()), cmap='gray', interpolation='none')
# ax.axis('off')
# ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
# tmpax = ax.twiny()
# tmpax.xaxis.set_ticks([])
# tmpax.xaxis.set_label_position('bottom')
# axs2.append(tmpax)
axs[0,0].set_ylabel('Weak amplification', fontsize=20)
axs[1,0].set_ylabel('Strong amplification', fontsize=20)
axs[1,0].set_xlabel('Amplifying dark regions', fontsize=20)
axs[1,1].set_xlabel('Amplifying bright regions', fontsize=20)
# axs2[0].set_xlabel(t_eqs[0], fontsize=18)
# axs2[1].set_xlabel(t_eqs[1], fontsize=18)
# axs2[2].set_xlabel(t_eqs[2], fontsize=18)
# axs2[3].set_xlabel(t_eqs[3], fontsize=18)
axs.ravel()[0].set_title(t_eqs[0], fontsize=20)
axs.ravel()[1].set_title(t_eqs[1], fontsize=20)
axs.ravel()[2].set_title(t_eqs[2], fontsize=20)
axs.ravel()[3].set_title(t_eqs[3], fontsize=20)
f.tight_layout()
f.subplots_adjust(wspace=0.02, top=0.92)
f.savefig('./data/plots/brighten_darken/amplifier_t.png', bbox_inches='tight')
f2, ax = plt.subplots(num=2, figsize=(10,10))
f2.suptitle('Source Image', fontsize=28)
# ax.imshow(resizeforplot(sharpen(I, bg)), interpolation='none')
ax.imshow(resizeforplot(I), interpolation='none')
ax.axis('off')
f2.tight_layout()
f2.subplots_adjust(wspace=0.02, hspace=0.02, top=0.92)
f2.savefig('./data/plots/brighten_darken/amplifier_I.png', bbox_inches='tight')
f3, axs = plt.subplots(2,2, num=3, figsize=(10,10))
f3.suptitle(r'Whole Image Brightening: $\mathbf{J} = \frac{\mathbf{I}-\mathbf{0}}{\mathbf{t}} + \mathbf{0}$', fontsize=28)
f3b, axs3b = plt.subplots(2,2, num=33, figsize=(10,10))
f3b.suptitle(r'Whole Image Brightening followed by Sharpening', fontsize=28)
letter1 = ['A.', 'B.', 'C.', 'D.']
for n,(ax, ax3b, t) in enumerate(zip(axs.ravel(), axs3b.ravel(), [a,b,c,d])):
J = solveJ(I, 0, t)
J3b = sharpen(J, bg)
ax.imshow(resizeforplot(J), interpolation='none')
ax.set_title(f'{letter1[n]} {t_eqs[n]}', fontsize=22)
ax.axis('off')
ax3b.axis('off')
ax3b.imshow(resizeforplot(J3b))
ax3b.set_title(f's{letter1[n]}', fontsize=22)
f3.tight_layout()
f3.subplots_adjust(wspace=0.02, hspace=0.02, top=.9)
f3.savefig('data/plots/brighten_darken/amplifier_b.png', bbox_inches='tight')
f3b.tight_layout()
f3b.subplots_adjust(wspace=0.02, hspace=0.02, top=.9)
f3b.savefig('data/plots/brighten_darken/amplifier_b_sharpen.png', bbox_inches='tight')
f4, axs = plt.subplots(2,2, num=4, figsize=(10,10))
f4.suptitle(r'Whole Image Darkening: $\mathbf{J} = \frac{\mathbf{I}-\mathbf{1}}{\mathbf{t}} + \mathbf{1}$', fontsize=28)
f4b, axs4b = plt.subplots(2,2, num=44, figsize=(10,10))
f4b.suptitle(r'Whole Image Darkening followed by Sharpening', fontsize=28)
letter2 = ['W.', 'X.', 'Y.', 'Z.']
for n,(ax, ax4b, t) in enumerate(zip(axs.ravel(), axs4b.ravel(), [a,b,c,d])):
J = solveJ(I, 1, t)
J4b = sharpen(J, bg)
ax.imshow(resizeforplot(J), interpolation='none')
ax.set_title(f'{letter2[n]} {t_eqs[n]}', fontsize=22)
ax.axis('off')
ax4b.axis('off')
ax4b.imshow(resizeforplot(J4b))
ax4b.set_title(f's{letter2[n]}', fontsize=22)
f4.tight_layout()
f4.subplots_adjust(wspace=0.02, hspace=0.02, top=.9)
f4.savefig('data/plots/brighten_darken/amplifier_d.png', bbox_inches='tight')
f4b.tight_layout()
f4b.subplots_adjust(wspace=0.02, hspace=0.02, top=.9)
f4b.savefig('data/plots/brighten_darken/amplifier_d_sharpen.png', bbox_inches='tight')
plt.show(block=False)
# Extra visuals showing effects of composing images together. compute
# intensive.
# # Brighten and darken under parallel composition by avg.
# from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
# from itertools import product
# f5 = plt.figure(figsize=(10,10), num=5)
# grid = ImageGrid(f5, 111, (4, 4))
# for i, (bright, dark) in enumerate(product([A,B,C,D], [W,X,Y,Z])):
# grid[i].imshow(resizeforplot(sharpen(bright/2+dark/2, bg)))
# # grid[i].imshow(resizeforplot(bright/2+dark/2))
# f5.savefig('data/plots/brighten_darken/compose_parallel_avg.png', bbox_inches='tight')
# # # composition AZ and ZA
# bd = (solveJ(i, a, t) for i in [A,B,C,D] for a in [1] for t in [a,b,c,d])
# db = (solveJ(i, a, t) for i in [W,X,Y,Z] for a in [0] for t in [a,b,c,d])
# from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
# f6 = plt.figure(figsize=(10,10), num=6)
# f7 = plt.figure(figsize=(10,10), num=7)
# grid6 = ImageGrid(f6, 111, (4, 4))
# grid7 = ImageGrid(f7, 111, (4, 4))
# for i, (b, d) in enumerate(zip(bd, db)):
# # grid6[i].imshow(resizeforplot(sharpen(b, bg)))
# # grid7[i].imshow(resizeforplot(sharpen(d, bg)))
# grid6[i].imshow(resizeforplot(b))
# grid7[i].imshow(resizeforplot(d))
# f6.savefig('data/plots/brighten_darken/compose_series_bd.png', bbox_inches='tight')
# f7.savefig('data/plots/brighten_darken/compose_series_db.png', bbox_inches='tight')
plt.show(block=False)
|
the-stack_0_20319 | # Originally written by Eric Martel ([email protected] / www.ericmartel.com)
# Improved by Wade Brainerd ([email protected] / www.wadeb.com)
import sublime
import sublime_plugin
import fnmatch, os, re, sys
from math import *
# http://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
isinstance("", basestring)
def isstr(s):
return isinstance(s, basestring)
except NameError:
def isstr(s):
return isinstance(s, str)
directory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(directory)
from tinynumpy import tinynumpy
try:
import numpy
except ImportError:
print("=== NumPy disabled, using TinyNumPy instead ===")
print("To enable cell evaluation using the full NumPy, download NumPy from:")
print(" https://pypi.python.org/pypi/numpy")
print("and install it into Sublime Text's Packages directory.")
print("For information on the features and limitations of TinyNumPy, visit:")
print(" https://github.com/wadetb/tinynumpy")
print("======================")
numpy = tinynumpy
class SortDirection:
Ascending = 1
Descending = 2
class CSVValue:
def __init__(self, text, first_char_index=0, last_char_index=0):
self.text = text
self.first_char_index = first_char_index
self.last_char_index = last_char_index
def AsFloat(self):
try:
return True, float(self.text)
except ValueError:
return False, None
def Compare(self, other):
a_is_float, a_float = self.AsFloat()
b_is_float, b_float = other.AsFloat()
if a_is_float and b_is_float:
return a_float - b_float
if self.text > other.text:
return 1
if self.text < other.text:
return -1
return 0
def __lt__(self, other): return self.Compare(other) < 0
def __eq__(self, other): return self.Compare(other) == 0
class CSVMatrix:
def __init__(self, view):
self.rows = []
self.num_columns = 0
self.valid = False
self.view = view
self.settings = sublime.load_settings('AdvancedCSV.sublime-settings')
self.ChooseDelimiter()
self.auto_quote = self.GetViewOrUserSetting( 'auto_quote', True )
def GetViewOrUserSetting(self, name, default):
if self.view.settings().has(name):
return self.view.settings().get(name)
else:
return self.settings.get(name, default)
def ChooseDelimiter(self):
self.delimiter = None
# Highest priority: per-view saved setting (CSV -> Set Delimiter).
if self.view.settings().has('delimiter'):
self.delimiter = self.view.settings().get('delimiter')
# Second highest priority: filename-based matching
if not self.delimiter:
filename = self.view.file_name()
if filename:
self.delimiter_mapping = self.settings.get('delimiter_mapping', {})
for k, v in self.delimiter_mapping.items():
if fnmatch.fnmatch(filename, k):
self.delimiter = v
break
# Final priority: user or system setting, fallback to comma.
if not self.delimiter:
self.delimiter = self.settings.get('delimiter', ',')
# Special case for recognizing '\t' for tabs.
if self.delimiter == '\\t':
self.delimiter = '\t'
if not isstr(self.delimiter) or len(self.delimiter) != 1:
print("'{0}' is not a valid delimiter, reverting to ','.".format(self.delimiter))
self.delimiter = ','
print("Using delimiter: '{0}'.".format(self.delimiter))
def AddRow(self, row):
self.rows.append(row)
def Finalize(self):
if not len(self.rows):
return
self.num_columns = 0
for row in self.rows:
if len(row) > self.num_columns:
self.num_columns = len(row)
self.valid = True
@staticmethod
def GetCellValue(row, column_index):
try:
return row[column_index]
except IndexError:
return CSVValue('')
def SortByColumn(self, column_index, direction, use_header):
class Compare:
def __init__(self, row): self.value = CSVMatrix.GetCellValue(row, column_index)
def __lt__(self, other): return self.value < other.value
def __eq__(self, other): return self.value == other.value
reverse = direction == SortDirection.Descending
if use_header:
self.rows[1:] = sorted(self.rows[1:], key=lambda row: Compare(row), reverse=reverse)
else:
self.rows.sort(key=lambda row: Compare(row), reverse=reverse)
def InsertColumn(self, column_index):
for row in self.rows:
if column_index <= len(row):
row.insert(column_index, CSVValue(''))
def DeleteColumn(self, column_index):
for row in self.rows:
if column_index < len(row):
row.pop(column_index)
def DeleteTrailingColumns(self, column_index):
for row in self.rows:
last_column_index = 0
for column_index, value in enumerate(row):
if len(value.text.strip()) > 0:
last_column_index = column_index
first_empty_column_index = last_column_index + 1
del row[first_empty_column_index:]
def SelectColumn(self, column_index, view):
view.sel().clear()
for row_index, row in enumerate(self.rows):
if column_index < len(row):
value = row[column_index]
a = view.text_point(row_index, value.first_char_index)
b = view.text_point(row_index, value.last_char_index)
region = sublime.Region(a, b)
view.sel().add(region)
@staticmethod
def SaveSelection(view):
saved_selection = []
for region in view.sel():
a_row, a_col = view.rowcol(region.a)
b_row, b_col = view.rowcol(region.b)
rowcol_region = (a_row, a_col, b_row, b_col)
saved_selection.append(rowcol_region)
return saved_selection
@staticmethod
def RestoreSelection(view, saved_selection):
view.sel().clear()
for rowcol_region in saved_selection:
a = view.text_point(rowcol_region[0], rowcol_region[1])
b = view.text_point(rowcol_region[2], rowcol_region[3])
region = sublime.Region(a, b)
view.sel().add(region)
def QuoteText(self, text):
if not self.auto_quote:
return text
if self.delimiter in text or '"' in text:
return '"' + text.replace('"', '""') + '"'
else:
return text
def MeasureColumns(self):
self.column_widths = [0] * self.num_columns
for row in self.rows:
for column_index, value in enumerate(row):
text = self.QuoteText(value.text)
width = len(text)
if width > self.column_widths[column_index]:
self.column_widths[column_index] = width
def Format(self):
output = ''
for row_index, row in enumerate(self.rows):
row_text = ''
for column_index, value in enumerate(row):
quoted_text = self.QuoteText(value.text)
row_text += quoted_text
if column_index < len(row) - 1:
row_text += self.delimiter
output += row_text
if row_index < len(self.rows) - 1:
output += '\n'
return output
def FormatCompacted(self):
output = ''
for row_index, row in enumerate(self.rows):
row_text = ''
for column_index, value in enumerate(row):
quoted_trimmed_text = self.QuoteText(value.text.strip())
row_text += quoted_trimmed_text
if column_index < len(row) - 1:
row_text += self.delimiter
output += row_text
if row_index < len(self.rows) - 1:
output += '\n'
return output
def FormatExpanded(self):
self.MeasureColumns()
output = ''
for row_index, row in enumerate(self.rows):
row_text = ''
for column_index, value in enumerate(row):
quoted_text = self.QuoteText(value.text)
column_width = self.column_widths[column_index]
quoted_padded_text = quoted_text.ljust(column_width)
row_text += quoted_padded_text
if column_index < len(row) - 1:
row_text += self.delimiter
output += row_text
if row_index < len(self.rows) - 1:
output += '\n'
return output
def ParseRow(self, row):
columns = []
currentword = ''
first_char_index = 0
insidequotes = False
char_index = 0
while char_index < len(row):
char = row[char_index]
if insidequotes:
if char == '"':
if char_index < len(row) - 1 and row[char_index + 1] == '"':
if self.auto_quote:
currentword += '"'
else:
currentword += '""'
char_index += 2
continue
insidequotes = False
if not self.auto_quote:
currentword += char
else:
currentword += char
else:
if char == '"':
insidequotes = True
if not self.auto_quote:
currentword += char
elif char == self.delimiter:
columns.append(CSVValue(currentword, first_char_index, char_index))
currentword = ''
first_char_index = char_index + 1
else:
currentword += char
char_index += 1
columns.append(CSVValue(currentword, first_char_index, char_index))
return columns
@staticmethod
def FromView(view):
matrix = CSVMatrix(view)
text = view.substr(sublime.Region(0, view.size()))
for line in text.split("\n"):
row = matrix.ParseRow(line)
matrix.AddRow(row)
matrix.Finalize()
return matrix
def GetColumnIndexFromCursor(self, view):
selection = view.sel()[0]
row_index, col_index = view.rowcol(selection.begin())
if row_index < len(self.rows):
row = self.rows[row_index]
for column_index, value in enumerate(row):
if value.first_char_index > col_index:
return column_index - 1
return len(row) - 1
else:
return 0
EXPRESSION_RE = re.compile(r'''
\s*
(\[
(?P<row_begin_mod>[+-])?
(?P<row_begin>\d+)?
(?P<row_delim>:)?
(?P<row_end_mod>[+-])?
(?P<row_end>\d+)?
(?P<comma>,)?
(?P<column_begin_mod>[+-])?
(?P<column_begin>\d+)?
(?P<column_delim>:)?
(?P<column_end_mod>[+-])?
(?P<column_end>\d+)?
\])?
\s*
(?P<direction>[<>v^])?
\s*
=
\s*
(?P<expression>.+)
''', re.VERBOSE)
def ApplyModifier(self, value, mod, base_value):
if mod == '+':
return base_value + value
elif mod == '-':
return base_value - value
else:
return value
def GetCoordinateRange(self, begin_mod, begin, delim, end_mod, end, base_value):
if delim:
if begin is None:
begin = 0
else:
begin = self.ApplyModifier(int(begin), begin_mod, base_value)
if end is None:
end = len(self.rows)
else:
end = self.ApplyModifier(int(end), end_mod, base_value)
else:
if begin is None:
begin = base_value
end = base_value + 1
else:
begin = self.ApplyModifier(int(begin), begin_mod, base_value)
end = begin + 1
return (begin, end)
def GetRowColumnCoordinateRange(self, coordinate_match, base_row_index, base_column_index):
row_begin_mod = coordinate_match.group('row_begin_mod')
row_begin = coordinate_match.group('row_begin')
row_delim = coordinate_match.group('row_delim')
row_end_mod = coordinate_match.group('row_end_mod')
row_end = coordinate_match.group('row_end')
row_range = self.GetCoordinateRange(row_begin_mod, row_begin, row_delim, row_end_mod, row_end, base_row_index)
column_begin_mod = coordinate_match.group('column_begin_mod')
column_begin = coordinate_match.group('column_begin')
column_delim = coordinate_match.group('column_delim')
column_end_mod = coordinate_match.group('column_end_mod')
column_end = coordinate_match.group('column_end')
column_range = self.GetCoordinateRange(column_begin_mod, column_begin, column_delim, column_end_mod, column_end, base_column_index)
return (row_range[0], row_range[1], column_range[0], column_range[1])
def ApplyDirectionOffsetToRange(self, direction_match, coord_range):
direction = direction_match.group('direction')
if direction == '^':
return (coord_range[0] - 1, coord_range[1] - 1, coord_range[2], coord_range[3])
elif direction == 'v':
return (coord_range[0] + 1, coord_range[1] + 1, coord_range[2], coord_range[3])
elif direction == '<':
return (coord_range[0], coord_range[1], coord_range[2] - 1, coord_range[3] - 1)
elif direction == '>':
return (coord_range[0], coord_range[1], coord_range[2] + 1, coord_range[3] + 1)
else:
return coord_range
def EvaluateExpressionCell(self, m, row_index, column_index, value, expression_match):
target_range = self.GetRowColumnCoordinateRange(expression_match, row_index, column_index)
target_range = self.ApplyDirectionOffsetToRange(expression_match, target_range)
expression = expression_match.group('expression')
# Expand sheet for target range.
while target_range[1] >= len(self.rows):
self.rows.append([])
while target_range[3] >= len(self.column_widths):
self.column_widths.append(0)
for target_row_index in range(target_range[0], target_range[1]):
for target_column_index in range(target_range[2], target_range[3]):
try:
l = {}
l['m'] = m
l['row'] = target_row_index
l['col'] = target_column_index
l['frow'] = row_index
l['fcol'] = column_index
result = eval(str(expression), None, l)
except Exception as e:
print("Exception '{0}' evaluating expression for target cell [{1}, {2}].".format(str(e), target_row_index, target_column_index))
result = str(e)
try:
row = self.rows[target_row_index]
while target_column_index >= len(row):
row.append(CSVValue(''.ljust(self.column_widths[len(row)])))
target_value = self.rows[target_row_index][target_column_index]
target_value.text = str(result).ljust(len(target_value.text))
except IndexError:
print("Invalid expression target cell [{0}, {1}].".format(target_row_index, target_column_index))
def Evaluate(self):
if not numpy:
print("Cannot evaluate without NumPy.")
return
self.MeasureColumns()
dimensions = (len(self.rows), self.num_columns)
m = numpy.zeros(dimensions)
for row_index, row in enumerate(self.rows):
for column_index, value in enumerate(row):
is_float, float_value = value.AsFloat()
if is_float:
m[row_index,column_index] = float_value
for row_index, row in enumerate(self.rows):
for column_index, value in enumerate(row):
expression_match = CSVMatrix.EXPRESSION_RE.match(value.text)
if expression_match:
self.EvaluateExpressionCell(m, row_index, column_index, value, expression_match)
class CsvSetOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
if 'output' in args:
self.view.replace(edit, sublime.Region(0, self.view.size()), args['output']);
if 'saved_selection' in args:
CSVMatrix.RestoreSelection(self.view, args['saved_selection'])
class CsvSortByColAscCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.matrix = CSVMatrix.FromView(self.view)
if not self.matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
self.saved_selection = self.matrix.SaveSelection(self.view)
self.view.window().show_quick_panel(['Use header row', 'Don\'t use header row'], self.on_select_header_done)
def on_select_header_done(self, picked):
if picked < 0:
return
use_header = picked == 0
column_index = self.matrix.GetColumnIndexFromCursor(self.view)
self.matrix.SortByColumn(column_index, SortDirection.Ascending, use_header)
output = self.matrix.Format()
self.view.run_command('csv_set_output', {'output': output, 'saved_selection': self.saved_selection})
class CsvSortByColDescCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.matrix = CSVMatrix.FromView(self.view)
if not self.matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
self.saved_selection = self.matrix.SaveSelection(self.view)
self.view.window().show_quick_panel(['Use header row', 'Don\'t use header row'], self.on_select_header_done)
def on_select_header_done(self, picked):
if picked < 0:
return
use_header = picked == 0
column_index = self.matrix.GetColumnIndexFromCursor(self.view)
self.matrix.SortByColumn(column_index, SortDirection.Descending, use_header)
output = self.matrix.Format()
self.view.run_command('csv_set_output', {'output': output, 'saved_selection': self.saved_selection})
class CsvInsertColCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
column_index = matrix.GetColumnIndexFromCursor(self.view)
matrix.InsertColumn(column_index)
output = matrix.Format()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvDeleteColCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
column_index = matrix.GetColumnIndexFromCursor(self.view)
matrix.DeleteColumn(column_index)
output = matrix.Format()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvDeleteTrailingColsCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
column_index = matrix.GetColumnIndexFromCursor(self.view)
matrix.DeleteTrailingColumns(column_index)
output = matrix.Format()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvSelectColCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
column_index = matrix.GetColumnIndexFromCursor(self.view)
matrix.SelectColumn(column_index, self.view)
class CsvFormatCompactCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
output = matrix.FormatCompacted()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvFormatExpandCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
output = matrix.FormatExpanded()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvEvaluateCommand(sublime_plugin.TextCommand):
def run(self, edit):
matrix = CSVMatrix.FromView(self.view)
if not matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
saved_selection = matrix.SaveSelection(self.view)
matrix.Evaluate()
output = matrix.Format()
self.view.replace(edit, sublime.Region(0, self.view.size()), output);
matrix.RestoreSelection(self.view, saved_selection)
class CsvFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.matrix = CSVMatrix.FromView(self.view)
if not self.matrix.valid:
sublime.error_message(__name__ + ": The buffer doesn't appear to be a CSV file")
return
self.view.window().show_input_panel('Format (ex. the {0} jumped over the {1})', "",
self.on_done, self.on_change, self.on_cancel)
CELL_RE = re.compile(r'{\d+}')
def on_done(self, input):
output = ''
numrows = len(self.matrix.rows)
for rowindex, row in enumerate(self.matrix.rows):
formatted_row = input
for columnindex, column in enumerate(row):
formatted_row = formatted_row.replace('{' + str(columnindex) + '}', str(column.text))
formatted_row = CsvFormatCommand.CELL_RE.sub('', formatted_row)
output += formatted_row
if rowindex < (numrows - 1):
output += '\n'
view = self.view.window().new_file()
view.set_name('Formatted Output')
view.set_scratch(True)
view.run_command('csv_set_output', {'output': output});
def on_change(self, input):
pass
def on_cancel(self):
pass
class CsvSetDelimiterCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().show_input_panel('Delimiter character', "",
self.on_done, self.on_change, self.on_cancel)
def on_done(self, input):
self.view.settings().set('delimiter', input)
def on_change(self, input):
pass
def on_cancel(self):
pass
|
the-stack_0_20321 | # -*- coding: utf-8 -*-
# yapf:disable
"""Command line interface script to import CIF files from external databases into `CifData` nodes."""
from __future__ import absolute_import
import click
from aiida.cmdline.params import options
from aiida.cmdline.utils import decorators, echo
from . import cmd_data
@cmd_data.group('cif')
def cmd_cif():
"""Commands to import, create and inspect `CifData` nodes."""
@cmd_cif.command('import')
@options.GROUP(help='Group in which to store the raw imported CifData nodes.', required=False)
@click.option(
'-d', '--database', type=click.Choice(['cod', 'icsd', 'mpds']), default='cod', show_default=True,
help='Select the database to import from.')
@click.option(
'-M', '--max-entries', type=click.INT, default=None, show_default=True, required=False,
help='Maximum number of entries to import.')
@click.option(
'-x', '--number-species', type=click.INT, default=None, show_default=True,
help='Import only cif files with this number of different species.')
@click.option(
'-o', '--skip-partial-occupancies', is_flag=True, default=False,
help='Skip entries that have partial occupancies.')
@click.option(
'-S', '--importer-server', type=click.STRING, required=False,
help='Optional server address thats hosts the database.')
@click.option(
'-H', '--importer-db-host', type=click.STRING, required=False,
help='Optional hostname for the database.')
@click.option(
'-D', '--importer-db-name', type=click.STRING, required=False,
help='Optional name for the database.')
@click.option(
'-P', '--importer-db-password', type=click.STRING, required=False,
help='Optional password for the database.')
@click.option(
'-U', '--importer-api-url', type=click.STRING, required=False,
help='Optional API url for the database.')
@click.option(
'-K', '--importer-api-key', type=click.STRING, required=False,
help='Optional API key for the database.')
@click.option(
'-c', '--count-entries', is_flag=True, default=False,
help='Return the number of entries the query yields and exit.')
@click.option(
'-b', '--batch-count', type=click.INT, default=1000, show_default=True,
help='Store imported cif nodes in batches of this size. This reduces the number of database operations '
'but if the script dies before a checkpoint the imported cif nodes of the current batch are lost.')
@click.option(
'-n', '--dry-run', is_flag=True, default=False,
help='Perform a dry-run.')
@options.VERBOSE(help='Print entries that are skipped.')
@decorators.with_dbenv()
def launch_cif_import(group, database, max_entries, number_species, skip_partial_occupancies, importer_server,
importer_db_host, importer_db_name, importer_db_password, importer_api_url, importer_api_key, count_entries,
batch_count, dry_run, verbose):
"""Import cif files from various structural databases, store them as CifData nodes and add them to a Group.
Note that to determine which cif files are already contained within the Group in order to avoid duplication,
the attribute 'source.id' of the CifData is compared to the source id of the imported cif entry. Since there
is no guarantee that these id's do not overlap between different structural databases and we do not check
explicitly for the database, it is advised to use separate groups for different structural databases.
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches,import-error
import inspect
from CifFile.StarFile import StarError
from datetime import datetime
from six.moves.urllib.error import HTTPError
from aiida import orm
from aiida.plugins import factories
from aiida_codtools.cli.utils.display import echo_utc
if not count_entries and group is None:
raise click.BadParameter('you have to specify a group unless the option --count-entries is specified')
importer_parameters = {}
launch_paramaters = {}
query_parameters = {}
if importer_server is not None:
importer_parameters['server'] = importer_server
if importer_db_host is not None:
importer_parameters['host'] = importer_db_host
if importer_db_name is not None:
importer_parameters['db'] = importer_db_name
if importer_db_password is not None:
importer_parameters['passwd'] = importer_db_password
if importer_api_url is not None:
importer_parameters['url'] = importer_api_url
if importer_api_key is not None:
importer_parameters['api_key'] = importer_api_key
importer_class = factories.DbImporterFactory(database)
importer = importer_class(**importer_parameters)
if database == 'mpds':
if number_species is None:
raise click.BadParameter('the number of species has to be defined for the {} database'.format(database))
query_parameters = {
'query': {},
'collection': 'structures'
}
if number_species == 1:
query_parameters['query']['classes'] = 'unary'
elif number_species == 2:
query_parameters['query']['classes'] = 'binary'
elif number_species == 3:
query_parameters['query']['classes'] = 'ternary'
elif number_species == 4:
query_parameters['query']['classes'] = 'quaternary'
elif number_species == 5:
query_parameters['query']['classes'] = 'quinary'
else:
# Limitation of MPDS: retrieve everything with more than 5 elements and filter on retrieved cifs. Since it
# is impossible to quickly determine the number of elements in a raw CIF file without parsing it, we cannot
# actually apply the filtering in the import here.
query_parameters['query']['classes'] = 'multinary'
else:
if number_species is not None:
query_parameters['number_of_elements'] = number_species
# Collect the dictionary of not None parameters passed to the launch script and print to screen
local_vars = locals()
for arg in inspect.getargspec(launch_cif_import.callback).args: # pylint: disable=deprecated-method
if arg in local_vars and local_vars[arg]:
launch_paramaters[arg] = local_vars[arg]
if not count_entries:
click.echo('=' * 80)
click.echo('Starting on {}'.format(datetime.utcnow().isoformat()))
click.echo('Launch parameters: {}'.format(launch_paramaters))
click.echo('Importer parameters: {}'.format(importer_parameters))
click.echo('Query parameters: {}'.format(query_parameters))
click.echo('-' * 80)
try:
query_results = importer.query(**query_parameters)
except Exception as exception: # pylint: disable=broad-except
echo.echo_critical('database query failed: {}'.format(exception))
if not count_entries:
builder = orm.QueryBuilder()
builder.append(orm.Group, filters={'label': group.label}, tag='group')
builder.append(orm.CifData, with_group='group', project='attributes.source.id')
existing_source_ids = [entry[0] for entry in builder.all()]
counter = 0
batch = []
for entry in query_results:
# Some query result generators fetch in batches, so we cannot simply return the length of the result set
if count_entries:
counter += 1
continue
source_id = entry.source['id']
if source_id in existing_source_ids:
if verbose:
echo_utc('Cif<{}> skipping: already present in group {}'.format(source_id, group.label))
continue
try:
cif = entry.get_cif_node()
except (AttributeError, UnicodeDecodeError, StarError, HTTPError) as exception:
if verbose:
name = exception.__class__.__name__
echo_utc('Cif<{}> skipping: encountered an error retrieving cif data: {}'.format(source_id, name))
else:
if skip_partial_occupancies and cif.has_partial_occupancies():
if verbose:
echo_utc('Cif<{}> skipping: contains partial occupancies'.format(source_id))
else:
if not dry_run:
batch.append(cif)
template = 'Cif<{}> adding: new CifData<{}> to group {}'
else:
template = 'Cif<{}> would have added: CifData<{}> to group {}'
echo_utc(template.format(source_id, cif.uuid, group.label))
counter += 1
if not dry_run and counter % batch_count == 0:
echo_utc('Storing batch of {} CifData nodes'.format(len(batch)))
nodes = [node.store() for node in batch]
group.add_nodes(nodes)
batch = []
if max_entries is not None and counter >= max_entries:
break
if count_entries:
click.echo('{}'.format(counter))
return
if not dry_run and batch:
echo_utc('Storing batch of {} CifData nodes'.format(len(batch)))
nodes = [node.store() for node in batch]
group.add_nodes(nodes)
click.echo('-' * 80)
click.echo('Stored {} new entries'.format(counter))
click.echo('Stopping on {}'.format(datetime.utcnow().isoformat()))
click.echo('=' * 80)
|
the-stack_0_20327 | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.media_info_type import MediaInfoType
import pprint
import six
class MediaInfoTypeResponse(object):
@poscheck_model
def __init__(self,
type_=None):
# type: (MediaInfoType) -> None
self._type = None
self.discriminator = None
if type_ is not None:
self.type = type_
@property
def openapi_types(self):
types = {
'type': 'MediaInfoType'
}
return types
@property
def attribute_map(self):
attributes = {
'type': 'type'
}
return attributes
@property
def type(self):
# type: () -> MediaInfoType
"""Gets the type of this MediaInfoTypeResponse.
The type of the media-info
:return: The type of this MediaInfoTypeResponse.
:rtype: MediaInfoType
"""
return self._type
@type.setter
def type(self, type_):
# type: (MediaInfoType) -> None
"""Sets the type of this MediaInfoTypeResponse.
The type of the media-info
:param type_: The type of this MediaInfoTypeResponse.
:type: MediaInfoType
"""
if type_ is not None:
if not isinstance(type_, MediaInfoType):
raise TypeError("Invalid type for `type`, type has to be `MediaInfoType`")
self._type = type_
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MediaInfoTypeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_20328 | """INI files."""
from __future__ import annotations
from configparser import ConfigParser, DuplicateOptionError, Error, MissingSectionHeaderError, ParsingError
from io import StringIO
from typing import Any, Iterator
import dictdiffer
from configupdater import ConfigUpdater, Space
from nitpick.plugins import hookimpl
from nitpick.plugins.base import NitpickPlugin
from nitpick.plugins.info import FileInfo
from nitpick.violations import Fuss, ViolationEnum
COMMA_SEPARATED_VALUES = "comma_separated_values"
SECTION_SEPARATOR = "."
TOP_SECTION = "__TEMPORARY_TOP_SECTION__"
class Violations(ViolationEnum):
"""Violations for this plugin."""
MISSING_SECTIONS = (321, " has some missing sections. Use this:")
MISSING_VALUES_IN_LIST = (322, " has missing values in the {key!r} key. Include those values:")
OPTION_HAS_DIFFERENT_VALUE = (323, ": [{section}]{key} is {actual} but it should be like this:")
MISSING_OPTION = (324, ": section [{section}] has some missing key/value pairs. Use this:")
INVALID_COMMA_SEPARATED_VALUES_SECTION = (325, f": invalid sections on {COMMA_SEPARATED_VALUES}:")
PARSING_ERROR = (326, ": parsing error ({cls}): {msg}")
TOP_SECTION_HAS_DIFFERENT_VALUE = (327, ": {key} is {actual} but it should be:")
TOP_SECTION_MISSING_OPTION = (328, ": top section has missing options. Use this:")
class IniPlugin(NitpickPlugin):
"""Enforce configurations and autofix INI files.
Examples of ``.ini`` files handled by this plugin:
- `setup.cfg <https://docs.python.org/3/distutils/configfile.html>`_
- `.editorconfig <https://editorconfig.org/>`_
- `tox.ini <https://github.com/tox-dev/tox>`_
- `.pylintrc <https://pylint.readthedocs.io/en/latest/user_guide/run.html#command-line-options>`_
Style examples enforcing values on INI files: :gitref:`flake8 configuration
<src/nitpick/resources/python/flake8.toml>`.
"""
fixable = True
identify_tags = {"ini", "editorconfig"}
violation_base_code = 320
updater: ConfigUpdater
comma_separated_values: set[str]
def post_init(self):
"""Post initialization after the instance was created."""
self.updater = ConfigUpdater()
self.comma_separated_values = set(self.nitpick_file_dict.get(COMMA_SEPARATED_VALUES, []))
if not self.needs_top_section:
return
if all(isinstance(v, dict) for v in self.expected_config.values()):
return
new_config = dict({TOP_SECTION: {}})
for key, value in self.expected_config.items():
if isinstance(value, dict):
new_config[key] = value
continue
new_config[TOP_SECTION][key] = value
self.expected_config = new_config
@property
def needs_top_section(self) -> bool:
"""Return True if this .ini file needs a top section (e.g.: .editorconfig)."""
return "editorconfig" in self.info.tags
@property
def current_sections(self) -> set[str]:
"""Current sections of the .ini file, including updated sections."""
return set(self.updater.sections())
@property
def initial_contents(self) -> str:
"""Suggest the initial content for this missing file."""
return self.get_missing_output()
@property
def expected_sections(self) -> set[str]:
"""Expected sections (from the style config)."""
return set(self.expected_config.keys())
@property
def missing_sections(self) -> set[str]:
"""Missing sections."""
return self.expected_sections - self.current_sections
def write_file(self, file_exists: bool) -> Fuss | None:
"""Write the new file."""
try:
if self.needs_top_section:
self.file_path.write_text(self.contents_without_top_section(str(self.updater)))
return None
if file_exists:
self.updater.update_file()
else:
self.updater.write(self.file_path.open("w"))
except ParsingError as err:
return self.reporter.make_fuss(Violations.PARSING_ERROR, cls=err.__class__.__name__, msg=err)
return None
@staticmethod
def contents_without_top_section(multiline_text: str) -> str:
"""Remove the temporary top section from multiline text, and keep the newline at the end of the file."""
return "\n".join(line for line in multiline_text.splitlines() if TOP_SECTION not in line) + "\n"
def get_missing_output(self) -> str:
"""Get a missing output string example from the missing sections in an INI file."""
missing = self.missing_sections
if not missing:
return ""
parser = ConfigParser()
for section in sorted(missing, key=lambda s: "0" if s == TOP_SECTION else f"1{s}"):
expected_config: dict = self.expected_config[section]
if self.autofix:
if self.updater.last_block:
self.updater.last_block.add_after.space(1)
self.updater.add_section(section)
self.updater[section].update(expected_config)
self.dirty = True
parser[section] = expected_config
return self.contents_without_top_section(self.get_example_cfg(parser))
# TODO: refactor: convert the contents to dict (with IniConfig().sections?) and mimic other plugins doing dict diffs
def enforce_rules(self) -> Iterator[Fuss]:
"""Enforce rules on missing sections and missing key/value pairs in an INI file."""
try:
yield from self._read_file()
except Error:
return
yield from self.enforce_missing_sections()
csv_sections = {v.split(SECTION_SEPARATOR)[0] for v in self.comma_separated_values}
missing_csv = csv_sections.difference(self.current_sections)
if missing_csv:
yield self.reporter.make_fuss(
Violations.INVALID_COMMA_SEPARATED_VALUES_SECTION, ", ".join(sorted(missing_csv))
)
# Don't continue if the comma-separated values are invalid
return
for section in self.expected_sections.intersection(self.current_sections) - self.missing_sections:
yield from self.enforce_section(section)
def _read_file(self) -> Iterator[Fuss]:
"""Read the .ini file or special files like .editorconfig."""
parsing_err: Error | None = None
try:
self.updater.read(str(self.file_path))
except MissingSectionHeaderError as err:
if self.needs_top_section:
original_contents = self.file_path.read_text()
self.updater.read_string(f"[{TOP_SECTION}]\n{original_contents}")
return
# If this is not an .editorconfig file, report this as a regular parsing error
parsing_err = err
except DuplicateOptionError as err:
parsing_err = err
if not parsing_err:
return
# Don't change the file if there was a parsing error
self.autofix = False
yield self.reporter.make_fuss(Violations.PARSING_ERROR, cls=parsing_err.__class__.__name__, msg=parsing_err)
raise Error
def enforce_missing_sections(self) -> Iterator[Fuss]:
"""Enforce missing sections."""
missing = self.get_missing_output()
if missing:
yield self.reporter.make_fuss(Violations.MISSING_SECTIONS, missing, self.autofix)
def enforce_section(self, section: str) -> Iterator[Fuss]:
"""Enforce rules for a section."""
expected_dict = self.expected_config[section]
actual_dict = {k: v.value for k, v in self.updater[section].items()}
# TODO: refactor: add a class Ini(BaseDoc) and move this dictdiffer code there
for diff_type, key, values in dictdiffer.diff(actual_dict, expected_dict):
if diff_type == dictdiffer.CHANGE:
if f"{section}.{key}" in self.comma_separated_values:
yield from self.enforce_comma_separated_values(section, key, values[0], values[1])
else:
yield from self.compare_different_keys(section, key, values[0], values[1])
elif diff_type == dictdiffer.ADD:
yield from self.show_missing_keys(section, values)
def enforce_comma_separated_values(self, section, key, raw_actual: Any, raw_expected: Any) -> Iterator[Fuss]:
"""Enforce sections and keys with comma-separated values. The values might contain spaces."""
actual_set = {s.strip() for s in raw_actual.split(",")}
expected_set = {s.strip() for s in raw_expected.split(",")}
missing = expected_set - actual_set
if not missing:
return
joined_values = ",".join(sorted(missing))
value_to_append = f",{joined_values}"
if self.autofix:
self.updater[section][key].value += value_to_append
self.dirty = True
section_header = "" if section == TOP_SECTION else f"[{section}]\n"
# TODO: test: top section with separated values in https://github.com/andreoliwa/nitpick/issues/271
yield self.reporter.make_fuss(
Violations.MISSING_VALUES_IN_LIST,
f"{section_header}{key} = (...){value_to_append}",
key=key,
fixed=self.autofix,
)
def compare_different_keys(self, section, key, raw_actual: Any, raw_expected: Any) -> Iterator[Fuss]:
"""Compare different keys, with special treatment when they are lists or numeric."""
if isinstance(raw_actual, (int, float, bool)) or isinstance(raw_expected, (int, float, bool)):
# A boolean "True" or "true" has the same effect on ConfigParser files.
actual = str(raw_actual).lower()
expected = str(raw_expected).lower()
else:
actual = raw_actual
expected = raw_expected
if actual == expected:
return
if self.autofix:
self.updater[section][key].value = expected
self.dirty = True
if section == TOP_SECTION:
yield self.reporter.make_fuss(
Violations.TOP_SECTION_HAS_DIFFERENT_VALUE,
f"{key} = {raw_expected}",
key=key,
actual=raw_actual,
fixed=self.autofix,
)
else:
yield self.reporter.make_fuss(
Violations.OPTION_HAS_DIFFERENT_VALUE,
f"[{section}]\n{key} = {raw_expected}",
section=section,
key=key,
actual=raw_actual,
fixed=self.autofix,
)
def show_missing_keys(self, section: str, values: list[tuple[str, Any]]) -> Iterator[Fuss]:
"""Show the keys that are not present in a section."""
parser = ConfigParser()
missing_dict = dict(values)
parser[section] = missing_dict
output = self.get_example_cfg(parser)
self.add_options_before_space(section, missing_dict)
if section == TOP_SECTION:
yield self.reporter.make_fuss(
Violations.TOP_SECTION_MISSING_OPTION, self.contents_without_top_section(output), self.autofix
)
else:
yield self.reporter.make_fuss(Violations.MISSING_OPTION, output, self.autofix, section=section)
def add_options_before_space(self, section: str, options: dict) -> None:
"""Add new options before a blank line in the end of the section."""
if not self.autofix:
return
space_removed = False
while isinstance(self.updater[section].last_block, Space):
space_removed = True
self.updater[section].last_block.detach()
self.updater[section].update(options)
self.dirty = True
if space_removed:
self.updater[section].last_block.add_after.space(1)
@staticmethod
def get_example_cfg(parser: ConfigParser) -> str:
"""Print an example of a config parser in a string instead of a file."""
string_stream = StringIO()
parser.write(string_stream)
output = string_stream.getvalue().strip()
return output
@hookimpl
def plugin_class() -> type[NitpickPlugin]:
"""Handle INI files."""
return IniPlugin
@hookimpl
def can_handle(info: FileInfo) -> type[NitpickPlugin] | None:
"""Handle INI files."""
if IniPlugin.identify_tags & info.tags:
return IniPlugin
return None
|
the-stack_0_20329 | response = {
"result": {
"task": {
"id": "456",
"title": "Алм",
"description": "Курс на социально-ориентированный национальный проект станет частью наших традиций",
"descriptionInBbcode": "Y",
"declineReason": None,
"priority": "1",
"status": "2",
"notViewed": "N",
"statusComplete": "1",
"multitask": "N",
"stageId": "0",
"responsibleId": "1",
"responsibleName": "Korvin",
"responsibleLastName": "-Control",
"responsibleSecondName": None,
"responsibleLogin": "admin",
"responsibleWorkPosition": None,
"responsiblePhoto": None,
"dateStart": None,
"durationFact": None,
"timeEstimate": "0",
"timeSpentInLogs": None,
"replicate": "N",
"deadline": "2022-05-09T01:00:00+04:00",
"deadlineOrig": "2022-05-09 00:00:00",
"startDatePlan": None,
"endDatePlan": None,
"createdBy": "1",
"createdByName": "Korvin",
"createdByLastName": "-Control",
"createdBySecondName": None,
"createdByLogin": "admin",
"createdByWorkPosition": None,
"createdByPhoto": None,
"createdDate": "2022-06-08T21:54:00+04:00",
"changedBy": "1",
"changedDate": "2022-06-08T21:54:00+04:00",
"statusChangedBy": "1",
"statusChangedDate": "2022-06-08T21:54:00+04:00",
"closedBy": None,
"closedDate": None,
"activityDate": "2022-06-08T21:54:00+04:00",
"guid": "{bbe24f38-df7f-42d4-b172-9ebf432559ed}",
"xmlId": None,
"mark": None,
"allowChangeDeadline": "N",
"allowTimeTracking": "N",
"matchWorkTime": "N",
"taskControl": "N",
"addInReport": "N",
"groupId": "0",
"forumTopicId": None,
"parentId": None,
"commentsCount": None,
"serviceCommentsCount": None,
"forumId": None,
"siteId": "s1",
"subordinate": "N",
"exchangeModified": None,
"exchangeId": None,
"outlookVersion": "1",
"viewedDate": None,
"deadlineCounted": "0",
"forkedByTemplateId": None,
"favorite": "N",
"sorting": None,
"durationPlanSeconds": None,
"durationTypeAll": "days",
"durationPlan": None,
"durationType": "days",
"isMuted": "N",
"isPinned": "N",
"isPinnedInGroup": "N",
"ufCrmTask": False,
"ufTaskWebdavFiles": False,
"ufMailMessage": None,
"auditors": [],
"accomplices": [],
"tags": ["скпэ22-2"],
"checklist": [],
"files": [],
"dependsOn": [],
"group": [],
"creator": {
"id": "1",
"name": "Korvin -Control",
"link": "/company/personal/user/1/",
"icon": "/bitrix/images/tasks/default_avatar.png",
},
"responsible": {
"id": "1",
"name": "Korvin -Control",
"link": "/company/personal/user/1/",
"icon": "/bitrix/images/tasks/default_avatar.png",
},
"newCommentsCount": 0,
"action": {
"accept": False,
"decline": False,
"complete": True,
"approve": False,
"disapprove": False,
"start": True,
"pause": False,
"delegate": True,
"remove": True,
"edit": True,
"defer": True,
"renew": False,
"create": True,
"changeDeadline": True,
"checklistAddItems": True,
"addFavorite": True,
"deleteFavorite": False,
"rate": True,
"edit.originator": False,
"checklist.reorder": True,
"elapsedtime.add": True,
"dayplan.timer.toggle": False,
"edit.plan": True,
"checklist.add": True,
"favorite.add": True,
"favorite.delete": False,
},
}
},
"time": {
"start": 1654710840.774196,
"finish": 1654710840.852366,
"duration": 0.0781700611114502,
"processing": 0.06392693519592285,
"date_start": "2022-06-08T20:54:00+03:00",
"date_finish": "2022-06-08T20:54:00+03:00",
},
}
|
the-stack_0_20332 | __author__ = "Anne Evered"
# %% REQUIRED LIBRARIES
import pandas as pd
import os
from src.visualization.simulation_figures_shared_functions import data_preparation
from src.visualization.simulation_figure_plotly import create_simulation_figure_plotly
# This script uses the plotly versions of simulation output animation/figure
# to explore how we might visualize "replay loop" that Eden (edengh) was working on.
#
# This work is currently in exploratory stage.
# Specify file location and filename
file_location = os.path.join("..", "..", "data", "raw")
filename = "replay_loop.csv"
# Create dataframe and respecify some of the columns so do not have duplicate or ambiguous column names
replay_loop_df = pd.read_csv(os.path.abspath(os.path.join(file_location, filename)))
replay_loop_df.rename(
columns={
"temp_basal": "jaeb_temp_basal",
"suggested_temp_basal_value": "pyloopkit_temp_basal",
},
inplace=True,
)
replay_loop_df = data_preparation(replay_loop_df)
# Example where the plot is showing bg as first subplot and the sbr/bolus information for pyloopkit
# as the second subplot and the sbr/bolus information for pyloopkit as the third subplot
traces = [
{
0: ["bg_sensor"],
1: ["sbr", "jaeb_temp_basal_sbr_if_nan", "reported_bolus"],
2: ["sbr", "suggested_bolus", "pyloopkit_temp_basal_sbr_if_nan"],
}
]
create_simulation_figure_plotly(
files_need_loaded=False,
file_location=file_location,
file_names=[filename],
data_frames=[replay_loop_df],
traces=traces,
subplots=3,
time_range=(0, 24),
main_title="Replay Loop Animation Example",
subtitle="",
subplot_titles=[
"BG Values",
"Insulin Given (as shown in Jaeb Data)",
"Insulin Suggested (from PyLoopKit and Bolus Recommendation Tool)",
],
save_fig_path=os.path.join(
"..", "..", "reports", "figures", "replay_loop_animation_examples"
),
figure_name="plotly_simulation_figure",
analysis_name="replay_loop",
animate=True,
)
# Example where the plot is showing bg as first subplot and then sbr information for both pyloopkit
# and jaeb as the second and the bolus information for pyloopkit and jaeb as the third
traces = [
{
0: ["bg_sensor"],
1: ["sbr", "jaeb_temp_basal_sbr_if_nan", "pyloopkit_temp_basal_sbr_if_nan"],
2: ["reported_bolus", "suggested_bolus"],
}
]
create_simulation_figure_plotly(
files_need_loaded=False,
file_location=file_location,
file_names=[filename],
data_frames=[replay_loop_df],
traces=traces,
subplots=3,
time_range=(0, 24),
main_title="Replay Loop Animation Example",
subtitle="",
subplot_titles=[
"BG Values",
"Basal Insulin Given (Jaeb) vs. Suggested (PyLoopKit)",
"Bolus Reported (Jaeb) vs. Suggested (Bolus Recommendation Tool)",
],
save_fig_path=os.path.join(
"..", "..", "reports", "figures", "replay_loop_animation_examples"
),
figure_name="plotly_simulation_figure",
analysis_name="replay_loop",
animate=True,
)
|
the-stack_0_20334 | """
Support for the `MAF`_ multiple sequence alignment format used by `multiz`_.
.. _MAF: http://genome.ucsc.edu/FAQ/FAQformat.html#format5
.. _multiz: http://www.bx.psu.edu/miller_lab/
"""
import itertools
import os
from six import Iterator, StringIO
from bx import interval_index_file
from bx.align import *
from bx.misc.seekbzip2 import SeekableBzip2File
MAF_INVERSE_STATUS = 'V'
MAF_INSERT_STATUS = 'I'
MAF_CONTIG_STATUS = 'C'
MAF_CONTIG_NESTED_STATUS = 'c'
MAF_NEW_STATUS = 'N'
MAF_NEW_NESTED_STATUS = 'n'
MAF_MAYBE_NEW_STATUS = 'S'
MAF_MAYBE_NEW_NESTED_STATUS = 's'
MAF_MISSING_STATUS = 'M'
class MAFIndexedAccess( interval_index_file.AbstractIndexedAccess ):
"""
Indexed access to a MAF file.
"""
def read_at_current_offset( self, file, **kwargs ):
"""
Read the MAF block at the current position in `file` and return an
instance of `Alignment`.
"""
return read_next_maf( file, **kwargs )
class MAFMultiIndexedAccess( interval_index_file.AbstractMultiIndexedAccess ):
"""
Indexed access to multiple MAF files.
"""
indexed_access_class = MAFIndexedAccess
Indexed = MAFIndexedAccess
"""Deprecated: `MAFIndexedAccess` is also available under the name `Indexed`."""
MultiIndexed = MAFMultiIndexedAccess
"""Deprecated: `MAFMultiIndexedAccess` is also available under the name `MultiIndexed`."""
class Reader( Iterator ):
"""
Iterate over all maf blocks in a file in order
"""
def __init__( self, file, **kwargs ):
self.file = file
self.maf_kwargs = kwargs
# Read and verify maf header, store any attributes
fields = self.file.readline().split()
if fields[0] != '##maf': raise Exception("File does not have MAF header")
self.attributes = parse_attributes( fields[1:] )
def __next__( self ):
return read_next_maf( self.file, **self.maf_kwargs )
def __iter__( self ):
return ReaderIter( self )
def close( self ):
self.file.close()
class ReaderIter( Iterator ):
"""
Adapts a `Reader` to the iterator protocol.
"""
def __init__( self, reader ):
self.reader = reader
def __iter__( self ):
return self
def __next__( self ):
v = next(self.reader)
if not v: raise StopIteration
return v
class Writer( object ):
def __init__( self, file, attributes={} ):
self.file = file
# Write header, Webb's maf code wants version first, we accomodate
if 'version' not in attributes: attributes['version'] = 1
self.file.write( "##maf version=%s" % attributes['version'] )
for key in attributes:
if key == 'version': continue
self.file.writelines( " %s=%s" % ( key, attributes[key] ) )
self.file.write( "\n" )
def write( self, alignment ):
self.file.write( "a score=" + str( alignment.score ) )
for key in alignment.attributes:
self.file.write( " %s=%s" % ( key, alignment.attributes[key] ) )
self.file.write( "\n" )
# Components
rows = []
for c in alignment.components:
# "Empty component" generates an 'e' row
if c.empty:
rows.append( ( "e", c.src, str( c.start ), str( c.size ), c.strand, str( c.src_size ), c.synteny_empty ) )
continue
# Regular component
rows.append( ( "s", c.src, str( c.start ), str( c.size ), c.strand, str( c.src_size ), c.text ) )
# If component has quality, write a q row
if c.quality is not None:
rows.append( ( "q", c.src, "", "", "", "", c.quality ) )
# If component has synteny follow up with an 'i' row
if c.synteny_left and c.synteny_right:
rows.append( ( "i", c.src, "", "", "", "", " ".join( map( str, c.synteny_left + c.synteny_right ) ) ) )
self.file.write( format_tabular( rows, "llrrrrl" ) )
self.file.write( "\n" )
def close( self ):
self.file.close()
# ---- Helper methods -------------------------------------------------------
def from_string( string, **kwargs ):
return read_next_maf( StringIO( string ), **kwargs )
def read_next_maf( file, species_to_lengths=None, parse_e_rows=False ):
"""
Read the next MAF block from `file` and return as an `Alignment`
instance. If `parse_i_rows` is true, empty components will be created
when e rows are encountered.
"""
alignment = Alignment(species_to_lengths=species_to_lengths)
# Attributes line
line = readline( file, skip_blank=True )
if not line: return None
fields = line.split()
if fields[0] != 'a': raise Exception("Expected 'a ...' line")
alignment.attributes = parse_attributes( fields[1:] )
if 'score' in alignment.attributes:
alignment.score = alignment.attributes['score']
del alignment.attributes['score']
else:
alignment.score = 0
# Sequence lines
last_component = None
while 1:
line = readline( file )
# EOF or Blank line terminates alignment components
if not line or line.isspace(): break
if line.isspace(): break
# Parse row
fields = line.split()
if fields[0] == 's':
# An 's' row contains sequence for a component
component = Component()
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
if len(fields) > 6: component.text = fields[6].strip()
# Add to set
alignment.add_component( component )
last_component = component
elif fields[0] == 'e':
# An 'e' row, when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows:
component = Component()
component.empty = True
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
component.text = None
synteny = fields[6].strip()
assert len( synteny ) == 1, \
"Synteny status in 'e' rows should be denoted with a single character code"
component.synteny_empty = synteny
alignment.add_component( component )
last_component = component
elif fields[0] == 'i':
# An 'i' row, indicates left and right synteny status for the
# previous component, we hope ;)
assert fields[1] == last_component.src, "'i' row does not follow matching 's' row"
last_component.synteny_left = ( fields[2], int( fields[3] ) )
last_component.synteny_right = ( fields[4], int( fields[5] ) )
elif fields[0] == 'q':
assert fields[1] == last_component.src, "'q' row does not follow matching 's' row"
# TODO: Should convert this to an integer array?
last_component.quality = fields[2]
return alignment
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
def parse_attributes( fields ):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes
def format_tabular( rows, align=None ):
if len( rows ) == 0: return ""
lengths = [ len( col ) for col in rows[ 0 ] ]
for row in rows[1:]:
for i in range( 0, len( row ) ):
lengths[ i ] = max( lengths[ i ], len( row[ i ] ) )
rval = ""
for row in rows:
for i in range( 0, len( row ) ):
if align and align[ i ] == "l":
rval += row[ i ].ljust( lengths[ i ] )
else:
rval += row[ i ].rjust( lengths[ i ] )
rval += " "
rval += "\n"
return rval
|
the-stack_0_20335 |
# scenario name for log file
scenario_name = "custom_geometry"
# timing parameters
# -----------------
end_time = 100.0 # [ms] end time of the simulation
stimulation_frequency = 100*1e-3 # [ms^-1] sampling frequency of stimuli in firing_times_file, in stimulations per ms, number before 1e-3 factor is in Hertz.
dt_3D = 1e-1 # [ms] time step width of coupling, when 3D should be performed, also sampling time of monopolar EMG
output_timestep = 1 # [ms] timestep for output surface EMG, 0.5
output_timestep_fibers = 1 # [ms] timestep for fiber output, 0.5
output_timestep_big = 1 # [ms] timestep for output files of 3D intramuscular EMG data
# custom 3D mesh
# ----------------------
import numpy as np
custom_meshes = {}
node_positions = []
# number of nodes of the 3D mesh
n_nodes_x = 6
n_nodes_y = 6
n_nodes_z = 100
# belly shape function of the muscle
def belly(r,z):
return r*(0.5 + (np.sin(z * 2*np.pi - 0.5*np.pi)+1)/2)
# loop over points of a structured grid, x,y,z coordinates are i,j,k with size n_nodes_x * n_nodes_y * n_nodes_z
for k in range(n_nodes_z):
for j in range(n_nodes_y):
for i in range(n_nodes_x):
fiber_no = j*n_nodes_x + i
x = i-(n_nodes_x-1)/2
y = j-(n_nodes_y-1)/2
# get polar coordinates of current point
phi = np.arctan2(y,x)
r = np.linalg.norm(np.array((x,y)))
# get belly shape of the muscle
r_new = belly(r,k/n_nodes_z)
# determine new position
x = r_new*np.cos(phi)
y = r_new*np.sin(phi)
z = k/n_nodes_z * 10 # 10 cm in z direction
node_positions.append([x,y,z])
print("n nodePositions: {}".format(len(node_positions)))
print("nElements: {}".format((n_nodes_x-1) * (n_nodes_y-1) * (n_nodes_z-1)))
custom_meshes["3Dmesh"] = {
"nElements": [(n_nodes_x-1), (n_nodes_y-1), (n_nodes_z-1)],
"nodePositions": node_positions,
"inputMeshIsGlobal": True,
"setHermiteDerivatives": False,
"logKey": "3Dmesh",
"nRanks": [1,1,1],
}
# custom 1D fiber meshes
# ----------------------
# number of fiber grid, only those fibers that are actually inside the muscle are generated
n_custom_fibers_x = 8
n_custom_fibers_y = 8
n_nodes_per_fiber = 1000
alpha = 0.4*np.pi # angles to skew the fibers
beta = 0.2*np.pi
n_custom_fibers = 0 # counter for actually generated fibers
# loop over fibers in a 2D grid
for j in range(n_custom_fibers_y):
for i in range(n_custom_fibers_x):
x_center = i-(n_custom_fibers_y-1)/2
y_center = j-(n_custom_fibers_x-1)/2
# loop over node positions of the current fiber
node_positions = []
for k in range(-n_nodes_per_fiber//2,n_nodes_per_fiber//2):
# determine position
x = x_center + k/n_nodes_per_fiber * np.tan(alpha)
y = y_center + k/n_nodes_per_fiber * np.tan(beta)
z = (k/n_nodes_per_fiber + 0.5) * 10 # 10 cm in z direction
# determine if fiber point is inside muscle volume
r_base = np.linalg.norm(np.array((i-(n_nodes_x-1)/2,j-(n_nodes_y-1)/2)))
r = np.linalg.norm(np.array((x,y)))
# if point is outside muscle, by using the belly heuristic
if r > belly(r_base,k/n_nodes_per_fiber + 0.5):
if len(node_positions) == 0: # if fiber inside the muscle has not yet started, continue, waiting for the first point inside the muscle
continue
else: # if there were already points inside the muscle, finish the current fiber
break
# add position to list of node positions for current fiber
node_positions.append([x,y,z])
# if there were node positions inside the muscle, add fiber to dict of fibers
if len(node_positions) == 0:
print("Fiber ({},{}) is completely outside the 3D mesh!".format(i,j))
else:
custom_meshes["MeshFiber_{}".format(n_custom_fibers)] = {
"nElements": len(node_positions)-1,
"nodePositions": node_positions,
"inputMeshIsGlobal": True,
"setHermiteDerivatives": False,
"nRanks": [1],
}
n_custom_fibers += 1
# other options
paraview_output = True
adios_output = False
exfile_output = False
python_output = False
enable_surface_emg = True
disable_firing_output = False
#fiber_file = "../../../input/left_biceps_brachii_13x13fibers.bin"
fiber_file = "../../../input/left_biceps_brachii_7x7fibers.bin"
firing_times_file = "../../../input/MU_firing_times_real.txt"
fiber_distribution_file = "../../../input/MU_fibre_distribution_10MUs.txt"
|
the-stack_0_20337 | import requests
import json
from testing import gen_points
import asyncio
import functools
import random
import time
import math
import re
import qpack
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
TIME_PRECISION = 's'
class TestHTTPAPI(TestBase):
title = 'Test HTTP API requests'
@default_test_setup(3, time_precision=TIME_PRECISION)
async def run(self):
await self.client0.connect()
x = requests.get(
f'http://localhost:9020/get-version', auth=('sa', 'siri'))
self.assertEqual(x.status_code, 200)
v = x.json()
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(v[0], str))
x = requests.post(
f'http://localhost:9020/insert/dbtest',
auth=('iris', 'siri'),
headers={'Content-Type': 'application/json'})
self.assertEqual(x.status_code, 400)
series_float = gen_points(
tp=float, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
series_int = gen_points(
tp=int, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
data = {
'my_float': series_float,
'my_int': series_int
}
x = requests.post(
f'http://localhost:9020/insert/dbtest',
data=json.dumps(data),
auth=('iris', 'siri'),
headers={'Content-Type': 'application/json'}
)
self.assertEqual(x.status_code, 200)
self.assertDictEqual(x.json(), {
'success_msg': 'Successfully inserted 20000 point(s).'})
data = {
'dbname': 'dbtest',
'host': 'localhost',
'port': 9000,
'username': 'iris',
'password': 'siri'
}
x = requests.post(
f'http://localhost:9021/new-pool',
data=json.dumps(data),
auth=('sa', 'siri'),
headers={'Content-Type': 'application/json'})
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), 'OK')
self.db.servers.append(self.server1)
await self.assertIsRunning(self.db, self.client0, timeout=30)
data = {'data': [[1579521271, 10], [1579521573, 20]]}
x = requests.post(
f'http://localhost:9020/insert/dbtest',
json=data,
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertDictEqual(x.json(), {
'success_msg': 'Successfully inserted 2 point(s).'})
x = requests.post(
f'http://localhost:9020/query/dbtest',
json={'q': 'select * from "data"'},
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), data)
x = requests.post(
f'http://localhost:9020/query/dbtest',
json={'q': 'select * from "data"', 't': 'ms'},
auth=('iris', 'siri'))
data = {
'data': [[p[0] * 1000, p[1]] for p in data['data']]
}
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), data)
x = requests.post(
f'http://localhost:9020/query/dbtest',
data=qpack.packb({
'q': 'select sum(1579600000) from "data"',
't': 'ms'}),
headers={'Content-Type': 'application/qpack'},
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertEqual(
qpack.unpackb(x.content, decode='utf8'),
{'data': [[1579600000000, 30]]})
x = requests.post(
f'http://localhost:9021/new-account',
json={'account': 't', 'password': ''},
auth=('sa', 'siri'))
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg':
'service account name should have at least 2 characters'})
x = requests.post(
f'http://localhost:9021/new-account',
json={'account': 'tt', 'password': 'pass'},
auth=('sa', 'siri'))
self.assertEqual(x.status_code, 200)
data = {
'dbname': 'dbtest',
'host': 'localhost',
'port': 1234,
'pool': 0,
'username': 'iris',
'password': 'siri'
}
auth = ('tt', 'pass')
x = requests.post(
f'http://localhost:9021/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg': "database name already exists: 'dbtest'"})
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 401)
auth = ('sa', 'siri')
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg':
"connecting to server 'localhost:1234' failed with error: "
"connection refused"})
data['port'] = 9000
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), 'OK')
self.db.servers.append(self.server2)
await self.assertIsRunning(self.db, self.client0, timeout=30)
x = requests.get(
f'http://localhost:9022/get-databases', auth=auth)
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), ['dbtest'])
self.client0.close()
if __name__ == '__main__':
parse_args()
run_test(TestHTTPAPI())
|
the-stack_0_20341 | import ctypes
import Image
from PIL import ImageDraw, ImageFont
from pi3d.constants import *
from pi3d.Texture import Texture
class Ttffont(Texture):
"""Loads a Ttf font from disk and creates a Texture and lookup table for
the String class to write with"""
def __init__(self, font, col="#ffffff", fontsize=48, imagesize=512):
"""Arguments:
*font*
file path/name to a ttf file
Keyword arguments:
*col*
colour in standard hex format #RRGGBB
*fontsize*
point size for drawing the letters on the internal Texture
*imagesize*
pixels square, needs to be bigger for large point size
"""
super(Ttffont, self).__init__(font)
self.font = font
imgfont = ImageFont.truetype(font, fontsize)
self.im = Image.new("RGBA", (imagesize, imagesize))
self.alpha = True
self.ix, self.iy = imagesize, imagesize
self.ch = []
draw = ImageDraw.Draw(self.im)
curX = 0.0
curY = 0.0
characters = []
maxRowHeight = 0.0
for i in range(32, 128):
ch = chr(i)
chwidth, chheight = imgfont.getsize(ch)
if curX + chwidth*1.1 >= imagesize:
curX = 0.0
curY = curY + maxRowHeight
maxRowHeight = 0.0
if chheight > maxRowHeight:
maxRowHeight = chheight
draw.text((curX, curY), ch, font = imgfont, fill = col)
x = (curX + 0.0) / self.ix
y = (curY + chheight + 0.0) / self.iy
tw = (chwidth + 0.0) / self.ix
th = (chheight + 0.0) / self.iy
w = imagesize
h = imagesize
self.ch.append((chwidth, chheight,
[(x + tw, y - th), (x, y - th), (x, y), (x + tw, y)],
[(chwidth, 0, 0), (0, 0, 0), (0, -chheight, 0), (chwidth, -chheight, 0)]))
curX = curX + chwidth*1.1 # to avoid overlapping corners of italics
RGBs = 'RGBA' if self.alpha else 'RGB'
self.image = self.im.convert(RGBs).tostring('raw', RGBs)
self._tex = ctypes.c_int()
def _load_disk(self):
"""
we need to stop the normal file loading by overriding this method
"""
|
the-stack_0_20342 | from mooquant import plotter, strategy
from mooquant.analyzer import drawdown, returns, sharpe, trades
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
from mooquant.tools import tushare
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__instrument = instrument
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
self.__datetime = feed[instrument].getDateTimes()
self.__open = feed[instrument].getOpenDataSeries()
self.__high = feed[instrument].getHighDataSeries()
self.__low = feed[instrument].getLowDataSeries()
self.__close = feed[instrument].getCloseDataSeries()
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
# self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
self.dayInfo(bars[self.__instrument])
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(
self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
# self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 /
bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
def dayInfo(self, bar):
try:
self.__openD[-1]
except AttributeError:
self.__openD = []
self.__highD = []
self.__lowD = []
self.__closeD = []
self.__upper_limit = []
self.__lower_limit = []
if len(self.__datetime) < 2:
self.__openD.append(bar.getOpen())
self.__highD.append(self.__high[-1])
self.__lowD.append(self.__low[-1])
self.__closeD.append(self.__close[-1])
return
# if another day
if self.__datetime[-1].date() != self.__datetime[-2].date():
self.__openD.append(bar.getOpen())
self.__highD.append(self.__high[-1])
self.__lowD.append(self.__low[-1])
self.__closeD.append(self.__close[-1])
self.__upper_limit.append(
round(round(self.__closeD[-2] * 1.1 * 1000) / 10) / 100)
self.__lower_limit.append(
round(round(self.__closeD[-2] * 0.9 * 1000) / 10) / 100)
print(self.__datetime[-1].date(),
self.__datetime[-2].date(), self.__openD[-1])
elif self.__datetime[-1].date() == self.__datetime[-2].date():
if self.__high[-1] > self.__highD[-1]:
self.__highD[-1] = self.__high[-1]
if self.__low[-1] < self.__lowD[-1]:
self.__lowD[-1] = self.__low[-1]
self.__closeD[-1] = self.__close[-1]
def main():
strat = thrSMA
instrument = '600288'
paras = [2, 20, 60, 10]
feeds = tushare.build_feed([instrument], 2016, 2017, "histdata/tushare")
strat = strat(feeds, instrument, *paras)
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
plter = plotter.StrategyPlotter(strat, True, True, True)
strat.run()
plter.plot()
# 夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
# 最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
# 收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
# 收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
if __name__ == "__main__":
main()
|
the-stack_0_20344 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from types import MethodType
from typing import Dict, List, Union
import numpy as np
import tensorflow as tf
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import HeadParameters
from rl_coach.architectures.middleware_parameters import MiddlewareParameters
from rl_coach.architectures.tensorflow_components.architecture import TensorFlowArchitecture
from rl_coach.architectures.tensorflow_components import utils
from rl_coach.base_parameters import AgentParameters, Device, DeviceType, EmbeddingMergerType
from rl_coach.core_types import PredictionType
from rl_coach.logger import screen
from rl_coach.spaces import SpacesDefinition, PlanarMapsObservationSpace, TensorObservationSpace
from rl_coach.utils import get_all_subclasses, dynamic_import_and_instantiate_module_from_params, indent_string
class GeneralTensorFlowNetwork(TensorFlowArchitecture):
"""
A generalized version of all possible networks implemented using tensorflow.
"""
# dictionary of variable-scope name to variable-scope object to prevent tensorflow from
# creating a new auxiliary variable scope even when name is properly specified
variable_scopes_dict = dict()
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'GeneralTensorFlowNetwork':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: a GeneralTensorFlowNetwork object
"""
if len(devices) > 1:
screen.warning("Tensorflow implementation only support a single device. Using {}".format(devices[0]))
def construct_on_device():
with tf.device(GeneralTensorFlowNetwork._tf_device(devices[0])):
return GeneralTensorFlowNetwork(*args, **kwargs)
# If variable_scope is in our dictionary, then this is not the first time that this variable_scope
# is being used with construct(). So to avoid TF adding an incrementing number to the end of the
# variable_scope to uniquify it, we have to both pass the previous variable_scope object to the new
# variable_scope() call and also recover the name space using name_scope
if variable_scope in GeneralTensorFlowNetwork.variable_scopes_dict:
variable_scope = GeneralTensorFlowNetwork.variable_scopes_dict[variable_scope]
with tf.variable_scope(variable_scope, auxiliary_name_scope=False) as vs:
with tf.name_scope(vs.original_name_scope):
return construct_on_device()
else:
with tf.variable_scope(variable_scope, auxiliary_name_scope=True) as vs:
# Add variable_scope object to dictionary for next call to construct
GeneralTensorFlowNetwork.variable_scopes_dict[variable_scope] = vs
return construct_on_device()
@staticmethod
def _tf_device(device: Union[str, MethodType, Device]) -> str:
"""
Convert device to tensorflow-specific device representation
:param device: either a specific string or method (used in distributed mode) which is returned without
any change or a Device type, which will be converted to a string
:return: tensorflow-specific string for device
"""
if isinstance(device, str) or isinstance(device, MethodType):
return device
elif isinstance(device, Device):
if device.device_type == DeviceType.CPU:
return "/cpu:0"
elif device.device_type == DeviceType.GPU:
return "/device:GPU:{}".format(device.index)
else:
raise ValueError("Invalid device_type: {}".format(device.device_type))
else:
raise ValueError("Invalid device instance type: {}".format(type(device)))
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, name: str,
global_network=None, network_is_local: bool=True, network_is_trainable: bool=False):
"""
:param agent_parameters: the agent parameters
:param spaces: the spaces definition of the agent
:param name: the name of the network
:param global_network: the global network replica that is shared between all the workers
:param network_is_local: is the network global (shared between workers) or local (dedicated to the worker)
:param network_is_trainable: is the network trainable (we can apply gradients on it)
"""
self.global_network = global_network
self.network_is_local = network_is_local
self.network_wrapper_name = name.split('/')[0]
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
self.num_heads_per_network = 1 if self.network_parameters.use_separate_networks_per_head else \
len(self.network_parameters.heads_parameters)
self.num_networks = 1 if not self.network_parameters.use_separate_networks_per_head else \
len(self.network_parameters.heads_parameters)
self.gradients_from_head_rescalers = []
self.gradients_from_head_rescalers_placeholders = []
self.update_head_rescaler_value_ops = []
self.adaptive_learning_rate_scheme = None
self.current_learning_rate = None
# init network modules containers
self.input_embedders = []
self.output_heads = []
super().__init__(agent_parameters, spaces, name, global_network,
network_is_local, network_is_trainable)
self.available_return_types = self._available_return_types()
self.is_training = None
def _available_return_types(self):
ret_dict = {cls: [] for cls in get_all_subclasses(PredictionType)}
components = self.input_embedders + [self.middleware] + self.output_heads
for component in components:
if not hasattr(component, 'return_type'):
raise ValueError((
"{} has no return_type attribute. Without this, it is "
"unclear how this component should be used."
).format(component))
if component.return_type is not None:
ret_dict[component.return_type].append(component)
return ret_dict
def predict_with_prediction_type(self, states: Dict[str, np.ndarray],
prediction_type: PredictionType) -> Dict[str, np.ndarray]:
"""
Search for a component[s] which has a return_type set to the to the requested PredictionType, and get
predictions for it.
:param states: The input states to the network.
:param prediction_type: The requested PredictionType to look for in the network components
:return: A dictionary with predictions for all components matching the requested prediction type
"""
ret_dict = {}
for component in self.available_return_types[prediction_type]:
ret_dict[component] = self.predict(inputs=states, outputs=component.output)
return ret_dict
def get_input_embedder(self, input_name: str, embedder_params: InputEmbedderParameters):
"""
Given an input embedder parameters class, creates the input embedder and returns it
:param input_name: the name of the input to the embedder (used for retrieving the shape). The input should
be a value within the state or the action.
:param embedder_params: the parameters of the class of the embedder
:return: the embedder instance
"""
allowed_inputs = copy.copy(self.spaces.state.sub_spaces)
allowed_inputs["action"] = copy.copy(self.spaces.action)
allowed_inputs["goal"] = copy.copy(self.spaces.goal)
if input_name not in allowed_inputs.keys():
raise ValueError("The key for the input embedder ({}) must match one of the following keys: {}"
.format(input_name, allowed_inputs.keys()))
emb_type = "vector"
if isinstance(allowed_inputs[input_name], TensorObservationSpace):
emb_type = "tensor"
elif isinstance(allowed_inputs[input_name], PlanarMapsObservationSpace):
emb_type = "image"
embedder_path = embedder_params.path(emb_type)
embedder_params_copy = copy.copy(embedder_params)
embedder_params_copy.activation_function = utils.get_activation_function(embedder_params.activation_function)
embedder_params_copy.input_rescaling = embedder_params_copy.input_rescaling[emb_type]
embedder_params_copy.input_offset = embedder_params_copy.input_offset[emb_type]
embedder_params_copy.name = input_name
module = dynamic_import_and_instantiate_module_from_params(embedder_params_copy,
path=embedder_path,
positional_args=[allowed_inputs[input_name].shape])
return module
def get_middleware(self, middleware_params: MiddlewareParameters):
"""
Given a middleware type, creates the middleware and returns it
:param middleware_params: the paramaeters of the middleware class
:return: the middleware instance
"""
mod_name = middleware_params.parameterized_class_name
middleware_path = middleware_params.path
middleware_params_copy = copy.copy(middleware_params)
middleware_params_copy.activation_function = utils.get_activation_function(middleware_params.activation_function)
module = dynamic_import_and_instantiate_module_from_params(middleware_params_copy, path=middleware_path)
return module
def get_output_head(self, head_params: HeadParameters, head_idx: int):
"""
Given a head type, creates the head and returns it
:param head_params: the parameters of the head to create
:param head_idx: the head index
:return: the head
"""
mod_name = head_params.parameterized_class_name
head_path = head_params.path
head_params_copy = copy.copy(head_params)
head_params_copy.activation_function = utils.get_activation_function(head_params_copy.activation_function)
return dynamic_import_and_instantiate_module_from_params(head_params_copy, path=head_path, extra_kwargs={
'agent_parameters': self.ap, 'spaces': self.spaces, 'network_name': self.network_wrapper_name,
'head_idx': head_idx, 'is_local': self.network_is_local})
def get_model(self):
# validate the configuration
if len(self.network_parameters.input_embedders_parameters) == 0:
raise ValueError("At least one input type should be defined")
if len(self.network_parameters.heads_parameters) == 0:
raise ValueError("At least one output type should be defined")
if self.network_parameters.middleware_parameters is None:
raise ValueError("Exactly one middleware type should be defined")
# ops for defining the training / testing phase
self.is_training = tf.Variable(False, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.is_training_placeholder = tf.placeholder("bool")
self.assign_is_training = tf.assign(self.is_training, self.is_training_placeholder)
for network_idx in range(self.num_networks):
with tf.variable_scope('network_{}'.format(network_idx)):
####################
# Input Embeddings #
####################
state_embedding = []
for input_name in sorted(self.network_parameters.input_embedders_parameters):
input_type = self.network_parameters.input_embedders_parameters[input_name]
# get the class of the input embedder
input_embedder = self.get_input_embedder(input_name, input_type)
self.input_embedders.append(input_embedder)
# input placeholders are reused between networks. on the first network, store the placeholders
# generated by the input_embedders in self.inputs. on the rest of the networks, pass
# the existing input_placeholders into the input_embedders.
if network_idx == 0:
input_placeholder, embedding = input_embedder()
self.inputs[input_name] = input_placeholder
else:
input_placeholder, embedding = input_embedder(self.inputs[input_name])
state_embedding.append(embedding)
##########
# Merger #
##########
if len(state_embedding) == 1:
state_embedding = state_embedding[0]
else:
if self.network_parameters.embedding_merger_type == EmbeddingMergerType.Concat:
state_embedding = tf.concat(state_embedding, axis=-1, name="merger")
elif self.network_parameters.embedding_merger_type == EmbeddingMergerType.Sum:
state_embedding = tf.add_n(state_embedding, name="merger")
##############
# Middleware #
##############
self.middleware = self.get_middleware(self.network_parameters.middleware_parameters)
_, self.state_embedding = self.middleware(state_embedding)
################
# Output Heads #
################
head_count = 0
for head_idx in range(self.num_heads_per_network):
if self.network_parameters.use_separate_networks_per_head:
# if we use separate networks per head, then the head type corresponds to the network idx
head_type_idx = network_idx
head_count = network_idx
else:
# if we use a single network with multiple embedders, then the head type is the current head idx
head_type_idx = head_idx
head_params = self.network_parameters.heads_parameters[head_type_idx]
for head_copy_idx in range(head_params.num_output_head_copies):
# create output head and add it to the output heads list
self.output_heads.append(
self.get_output_head(head_params,
head_idx*head_params.num_output_head_copies + head_copy_idx)
)
# rescale the gradients from the head
self.gradients_from_head_rescalers.append(
tf.get_variable('gradients_from_head_{}-{}_rescalers'.format(head_idx, head_copy_idx),
initializer=float(head_params.rescale_gradient_from_head_by_factor),
dtype=tf.float32))
self.gradients_from_head_rescalers_placeholders.append(
tf.placeholder('float',
name='gradients_from_head_{}-{}_rescalers'.format(head_type_idx, head_copy_idx)))
self.update_head_rescaler_value_ops.append(self.gradients_from_head_rescalers[head_count].assign(
self.gradients_from_head_rescalers_placeholders[head_count]))
head_input = (1-self.gradients_from_head_rescalers[head_count]) * tf.stop_gradient(self.state_embedding) + \
self.gradients_from_head_rescalers[head_count] * self.state_embedding
# build the head
if self.network_is_local:
output, target_placeholder, input_placeholders, importance_weight_ph = \
self.output_heads[-1](head_input)
self.targets.extend(target_placeholder)
self.importance_weights.extend(importance_weight_ph)
else:
output, input_placeholders = self.output_heads[-1](head_input)
self.outputs.extend(output)
# TODO: use head names as well
for placeholder_index, input_placeholder in enumerate(input_placeholders):
self.inputs['output_{}_{}'.format(head_type_idx, placeholder_index)] = input_placeholder
head_count += 1
# Losses
self.losses = tf.losses.get_losses(self.full_name)
self.losses += tf.losses.get_regularization_losses(self.full_name)
self.total_loss = tf.reduce_sum(self.losses)
# tf.summary.scalar('total_loss', self.total_loss)
# Learning rate
if self.network_parameters.learning_rate_decay_rate != 0:
self.adaptive_learning_rate_scheme = \
tf.train.exponential_decay(
self.network_parameters.learning_rate,
self.global_step,
decay_steps=self.network_parameters.learning_rate_decay_steps,
decay_rate=self.network_parameters.learning_rate_decay_rate,
staircase=True)
self.current_learning_rate = self.adaptive_learning_rate_scheme
else:
self.current_learning_rate = self.network_parameters.learning_rate
# Optimizer
if self.distributed_training and self.network_is_local and self.network_parameters.shared_optimizer:
# distributed training + is a local network + optimizer shared -> take the global optimizer
self.optimizer = self.global_network.optimizer
elif (self.distributed_training and self.network_is_local and not self.network_parameters.shared_optimizer) \
or self.network_parameters.shared_optimizer or not self.distributed_training:
# distributed training + is a global network + optimizer shared
# OR
# distributed training + is a local network + optimizer not shared
# OR
# non-distributed training
# -> create an optimizer
if self.network_parameters.optimizer_type == 'Adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.current_learning_rate,
beta1=self.network_parameters.adam_optimizer_beta1,
beta2=self.network_parameters.adam_optimizer_beta2,
epsilon=self.network_parameters.optimizer_epsilon)
elif self.network_parameters.optimizer_type == 'RMSProp':
self.optimizer = tf.train.RMSPropOptimizer(self.current_learning_rate,
decay=self.network_parameters.rms_prop_optimizer_decay,
epsilon=self.network_parameters.optimizer_epsilon)
elif self.network_parameters.optimizer_type == 'LBFGS':
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss, method='L-BFGS-B',
options={'maxiter': 25})
else:
raise Exception("{} is not a valid optimizer type".format(self.network_parameters.optimizer_type))
def __str__(self):
result = []
for network in range(self.num_networks):
network_structure = []
# embedder
for embedder in self.input_embedders:
network_structure.append("Input Embedder: {}".format(embedder.name))
network_structure.append(indent_string(str(embedder)))
if len(self.input_embedders) > 1:
network_structure.append("{} ({})".format(self.network_parameters.embedding_merger_type.name,
", ".join(["{} embedding".format(e.name) for e in self.input_embedders])))
# middleware
network_structure.append("Middleware:")
network_structure.append(indent_string(str(self.middleware)))
# head
if self.network_parameters.use_separate_networks_per_head:
heads = range(network, network+1)
else:
heads = range(0, len(self.output_heads))
for head_idx in heads:
head = self.output_heads[head_idx]
head_params = self.network_parameters.heads_parameters[head_idx]
if head_params.num_output_head_copies > 1:
network_structure.append("Output Head: {} (num copies = {})".format(head.name, head_params.num_output_head_copies))
else:
network_structure.append("Output Head: {}".format(head.name))
network_structure.append(indent_string(str(head)))
# finalize network
if self.num_networks > 1:
result.append("Sub-network for head: {}".format(self.output_heads[network].name))
result.append(indent_string('\n'.join(network_structure)))
else:
result.append('\n'.join(network_structure))
result = '\n'.join(result)
return result
|
the-stack_0_20349 | from problog.extern import problog_export, problog_export_nondet, problog_export_raw
@problog_export_raw("+term")
def assertz(term, target=None, **kwargs):
problog_export.database += term
target._cache.reset() # reset tabling cache
return [(term,)]
@problog_export_raw("+term")
def retract(term, target=None, **kwargs):
db = problog_export.database
nodekey = db.find(term)
node = db.get_node(nodekey)
to_erase = node.children.find(term.args)
if to_erase:
item = next(to_erase.__iter__())
node.children.erase((item,))
target._cache.reset() # reset tabling cache
return [(term,)]
else:
return []
@problog_export_raw("+term")
def retractall(term, target=None, **kwargs):
db = problog_export.database
nodekey = db.find(term)
node = db.get_node(nodekey)
to_erase = node.children.find(term.args)
node.children.erase(to_erase)
target._cache.reset() # reset tabling cache
return [(term,)]
|
the-stack_0_20351 | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d
from torch.nn.utils import weight_norm, remove_weight_norm
from vocoder.utils import init_weights, get_padding
import hparams as hp
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self):
super(Generator, self).__init__()
h = hp
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post) |
the-stack_0_20353 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install JAX-DFT."""
import os
import setuptools
# Read in requirements
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
requirements = [r.strip() for r in f]
setuptools.setup(
name='jax_dft',
version='0.0.0',
license='Apache 2.0',
author='Google LLC',
author_email='[email protected]',
install_requires=requirements,
url='https://github.com/google-research/google-research/'
'tree/master/jax_dft',
packages=setuptools.find_packages(),
python_requires='>=3.6')
|
the-stack_0_20355 | import json
import sys
from pathlib import Path
from numpy import log
import pandas as pd
DIR = Path(sys.argv[1])
FN = DIR / "perf_cuckoohashing.json"
assert FN.exists() and FN.is_file()
data = json.loads(FN.read_text())
def handle_record(record):
timer = {t["name"]: t["clock"] for t in record["timer"]}
return {
"log_count": record["log_count"],
"build": (timer["build:done"] - timer["build:start"]) / 1000000,
"match": (timer["match:done"] - timer["match:start"]) / 1000000,
}
df = pd.DataFrame([handle_record(rec) for rec in data])
df = df.groupby("log_count").describe()
df.to_excel(DIR / "perf_cuckoohashing.xlsx")
|
the-stack_0_20356 | # Copyright 2019 Palantir Technologies, Inc.
"""Linter pluging for flake8"""
import logging
import os.path
import re
from subprocess import Popen, PIPE
from pyls import hookimpl, lsp
log = logging.getLogger(__name__)
FIX_IGNORES_RE = re.compile(r'([^a-zA-Z0-9_,]*;.*(\W+||$))')
@hookimpl
def pyls_settings():
# Default flake8 to disabled
return {'plugins': {'flake8': {'enabled': False}}}
@hookimpl
def pyls_lint(workspace, document):
config = workspace._config
settings = config.plugin_settings('flake8', document_path=document.path)
log.debug("Got flake8 settings: %s", settings)
opts = {
'config': settings.get('config'),
'exclude': settings.get('exclude'),
'filename': settings.get('filename'),
'hang-closing': settings.get('hangClosing'),
'ignore': settings.get('ignore'),
'max-line-length': settings.get('maxLineLength'),
'select': settings.get('select'),
}
# flake takes only absolute path to the config. So we should check and
# convert if necessary
if opts.get('config') and not os.path.isabs(opts.get('config')):
opts['config'] = os.path.abspath(os.path.expanduser(os.path.expandvars(
opts.get('config')
)))
log.debug("using flake8 with config: %s", opts['config'])
# Call the flake8 utility then parse diagnostics from stdout
flake8_executable = settings.get('executable', 'flake8')
args = build_args(opts)
output = run_flake8(flake8_executable, args, document)
return parse_stdout(document, output)
def run_flake8(flake8_executable, args, document):
"""Run flake8 with the provided arguments, logs errors
from stderr if any.
"""
# a quick temporary fix to deal with Atom
args = [(i if not i.startswith('--ignore=') else FIX_IGNORES_RE.sub('', i))
for i in args if i is not None]
# if executable looks like a path resolve it
if not os.path.isfile(flake8_executable) and os.sep in flake8_executable:
flake8_executable = os.path.abspath(
os.path.expanduser(os.path.expandvars(flake8_executable))
)
log.debug("Calling %s with args: '%s'", flake8_executable, args)
try:
cmd = [flake8_executable]
cmd.extend(args)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except IOError:
log.debug("Can't execute %s. Trying with 'python -m flake8'", flake8_executable)
cmd = ['python', '-m', 'flake8']
cmd.extend(args)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(document.source.encode())
if stderr:
log.error("Error while running flake8 '%s'", stderr.decode())
return stdout.decode()
def build_args(options):
"""Build arguments for calling flake8.
Args:
options: dictionary of argument names and their values.
"""
args = ['-'] # use stdin
for arg_name, arg_val in options.items():
if arg_val is None:
continue
arg = None
if isinstance(arg_val, list):
arg = '--{}={}'.format(arg_name, ','.join(arg_val))
elif isinstance(arg_val, bool):
if arg_val:
arg = '--{}'.format(arg_name)
else:
arg = '--{}={}'.format(arg_name, arg_val)
args.append(arg)
return args
def parse_stdout(document, stdout):
"""
Build a diagnostics from flake8's output, it should extract every result and format
it into a dict that looks like this:
{
'source': 'flake8',
'code': code, # 'E501'
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
},
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
}
Args:
document: The document to be linted.
stdout: output from flake8
Returns:
A list of dictionaries.
"""
diagnostics = []
lines = stdout.splitlines()
for raw_line in lines:
parsed_line = re.match(r'(.*):(\d*):(\d*): (\w*) (.*)', raw_line)
if not parsed_line:
log.debug("Flake8 output parser can't parse line '%s'", raw_line)
continue
parsed_line = parsed_line.groups()
if len(parsed_line) != 5:
log.debug("Flake8 output parser can't parse line '%s'", raw_line)
continue
_, line, character, code, msg = parsed_line
line = int(line) - 1
character = int(character) - 1
diagnostics.append(
{
'source': 'flake8',
'code': code,
'range': {
'start': {
'line': line,
'character': character
},
'end': {
'line': line,
# no way to determine the column
'character': len(document.lines[line])
}
},
'message': msg,
'severity': lsp.DiagnosticSeverity.Warning,
}
)
return diagnostics
|
the-stack_0_20359 | """
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import importlib
import re
import uvloop
from pyrogram import filters, idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from wbb import (BOT_NAME, BOT_USERNAME, LOG_GROUP_ID, USERBOT_NAME,
aiohttpsession, app)
from wbb.modules import ALL_MODULES
from wbb.modules.sudoers import bot_sys_stats
from wbb.utils import paginate_modules
from wbb.utils.constants import MARKDOWN
from wbb.utils.dbfunctions import clean_restart_stage
loop = asyncio.get_event_loop()
HELPABLE = {}
async def start_bot():
global HELPABLE
for module in ALL_MODULES:
imported_module = importlib.import_module("wbb.modules." + module)
if (
hasattr(imported_module, "__MODULE__")
and imported_module.__MODULE__
):
imported_module.__MODULE__ = imported_module.__MODULE__
if (
hasattr(imported_module, "__HELP__")
and imported_module.__HELP__
):
HELPABLE[
imported_module.__MODULE__.replace(" ", "_").lower()
] = imported_module
bot_modules = ""
j = 1
for i in ALL_MODULES:
if j == 4:
bot_modules += "|{:<15}|\n".format(i)
j = 0
else:
bot_modules += "|{:<15}".format(i)
j += 1
print("+===============================================================+")
print("| WBB |")
print("+===============+===============+===============+===============+")
print(bot_modules)
print("+===============+===============+===============+===============+")
print(f"[INFO]: BOT STARTED AS {BOT_NAME}!")
print(f"[INFO]: USERBOT STARTED AS {USERBOT_NAME}!")
restart_data = await clean_restart_stage()
try:
print("[INFO]: SENDING ONLINE STATUS")
if restart_data:
await app.edit_message_text(
restart_data["chat_id"],
restart_data["message_id"],
"**Restarted Successfully**",
)
else:
await app.send_message(LOG_GROUP_ID, "Bot started!")
except Exception:
pass
await idle()
await aiohttpsession.close()
print("[INFO]: CLOSING AIOHTTP SESSION AND STOPPING BOT")
await app.stop()
print("[INFO]: Bye!")
for task in asyncio.all_tasks():
task.cancel()
print("[INFO]: Turned off!")
home_keyboard_pm = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Commands ❓", callback_data="bot_commands"
),
InlineKeyboardButton(
text="Repo 🛠",
url="https://t.me/Jaihindupuramking",
),
],
[
InlineKeyboardButton(
text="System Stats 🖥",
callback_data="stats_callback",
),
InlineKeyboardButton(
text="Support 👨", url="https://t.me/PHOENIX_CHAT_TAMIL"
),
],
[
InlineKeyboardButton(
text="Add Me To Your Group 🎉",
url=f"http://t.me/{BOT_USERNAME}?startgroup=new",
)
],
]
)
home_text_pm = (
f"Hey there! My name is {BOT_NAME}. I can manage your "
+ "group with lots of useful features, feel free to "
+ "add me to your group."
)
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Help ❓",
url=f"t.me/{BOT_USERNAME}?start=help",
),
InlineKeyboardButton(
text="Repo 🛠",
url="https://t.me/Jaihindupuramking",
),
],
[
InlineKeyboardButton(
text="System Stats 💻",
callback_data="stats_callback",
),
InlineKeyboardButton(text="Support 👨", url="https://t.me/PHOENIX_CHAT_TAMIL"),
],
]
)
@app.on_message(filters.command("start"))
async def start(_, message):
if message.chat.type != "private":
return await message.reply(
"Pm Me For More Details.", reply_markup=keyboard
)
if len(message.text.split()) > 1:
name = (message.text.split(None, 1)[1]).lower()
if name == "mkdwn_help":
await message.reply(
MARKDOWN, parse_mode="html", disable_web_page_preview=True
)
elif "_" in name:
module = name.split("_", 1)[1]
text = (
f"Here is the help for **{HELPABLE[module].__MODULE__}**:\n"
+ HELPABLE[module].__HELP__
)
await message.reply(text, disable_web_page_preview=True)
elif name == "help":
text, keyb = await help_parser(message.from_user.first_name)
await message.reply(
text,
reply_markup=keyb,
)
else:
await message.reply(
home_text_pm,
reply_markup=home_keyboard_pm,
)
return
@app.on_message(filters.command("help"))
async def help_command(_, message):
if message.chat.type != "private":
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
key = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Click here",
url=f"t.me/{BOT_USERNAME}?start=help_{name}",
)
],
]
)
await message.reply(
f"Click on the below button to get help about {name}",
reply_markup=key,
)
else:
await message.reply(
"PM Me For More Details.", reply_markup=keyboard
)
else:
await message.reply(
"Pm Me For More Details.", reply_markup=keyboard
)
else:
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
text = (
f"Here is the help for **{HELPABLE[name].__MODULE__}**:\n"
+ HELPABLE[name].__HELP__
)
await message.reply(text, disable_web_page_preview=True)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
text,
reply_markup=help_keyboard,
disable_web_page_preview=True,
)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
text, reply_markup=help_keyboard, disable_web_page_preview=True
)
return
async def help_parser(name, keyboard=None):
if not keyboard:
keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, "help"))
return (
"""Hello {first_name}, My name is {bot_name}.
I'm a group management bot with some useful features.
You can choose an option below, by clicking a button.
Also you can ask anything in Support Group.
""".format(
first_name=name,
bot_name=BOT_NAME,
),
keyboard,
)
@app.on_callback_query(filters.regex("bot_commands"))
async def commands_callbacc(_, CallbackQuery):
text, keyboard = await help_parser(CallbackQuery.from_user.mention)
await app.send_message(
CallbackQuery.message.chat.id,
text=text,
reply_markup=keyboard,
)
await CallbackQuery.message.delete()
@app.on_callback_query(filters.regex("stats_callback"))
async def stats_callbacc(_, CallbackQuery):
text = await bot_sys_stats()
await app.answer_callback_query(CallbackQuery.id, text, show_alert=True)
@app.on_callback_query(filters.regex(r"help_(.*?)"))
async def help_button(client, query):
home_match = re.match(r"help_home\((.+?)\)", query.data)
mod_match = re.match(r"help_module\((.+?)\)", query.data)
prev_match = re.match(r"help_prev\((.+?)\)", query.data)
next_match = re.match(r"help_next\((.+?)\)", query.data)
back_match = re.match(r"help_back", query.data)
create_match = re.match(r"help_create", query.data)
top_text = f"""
Hello {query.from_user.first_name}, My name is {BOT_NAME}.
I'm a group management bot with some usefule features.
You can choose an option below, by clicking a button.
Also you can ask anything in Support Group.
General command are:
- /start: Start the bot
- /help: Give this message
"""
if mod_match:
module = (mod_match.group(1)).replace(" ", "_")
text = (
"{} **{}**:\n".format(
"Here is the help for", HELPABLE[module].__MODULE__
)
+ HELPABLE[module].__HELP__
)
await query.message.edit(
text=text,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("back", callback_data="help_back")]]
),
disable_web_page_preview=True,
)
elif home_match:
await app.send_message(
query.from_user.id,
text=home_text_pm,
reply_markup=home_keyboard_pm,
)
await query.message.delete()
elif prev_match:
curr_page = int(prev_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(curr_page - 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif next_match:
next_page = int(next_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(next_page + 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif back_match:
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(0, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif create_match:
text, keyboard = await help_parser(query)
await query.message.edit(
text=text,
reply_markup=keyboard,
disable_web_page_preview=True,
)
return await client.answer_callback_query(query.id)
if __name__ == "__main__":
uvloop.install()
try:
try:
loop.run_until_complete(start_bot())
except asyncio.exceptions.CancelledError:
pass
loop.run_until_complete(asyncio.sleep(3.0)) # task cancel wait
finally:
loop.close()
|
the-stack_0_20367 | import torch
from lrp import Linear, Conv2d
from maxpool import MaxPool2d
from functional.utils import normalize
def grad_decorator_fn(module):
def fn(x):
return normalize(x)
return fn
avoid_normalization_on = ['relu', 'maxp']
def do_normalization(rule, module):
if "pattern" not in rule.lower(): return False
return not str(module)[:4].lower() in avoid_normalization_on
def is_kernel_layer(module):
return isinstance(module, Conv2d) or isinstance(module, Linear)
def is_rule_specific_layer(module):
return isinstance(module, MaxPool2d)
class Sequential(torch.nn.Sequential):
def forward(self, input, explain=False, rule="epsilon", pattern=None):
if not explain: return super(Sequential, self).forward(input)
first = True
if pattern is not None: pattern = list(pattern)
for module in self:
if do_normalization(rule, module):
input.register_hook(grad_decorator_fn(module))
if is_kernel_layer(module):
P = None
if pattern is not None:
P = pattern.pop(0)
input = module.forward(input, explain=True, rule=rule, pattern=P)
elif is_rule_specific_layer(module):
input = module.forward(input, explain=True, rule=rule)
else:
input = module(input)
first = False
if do_normalization(rule, module):
input.register_hook(grad_decorator_fn(module))
return input
|
the-stack_0_20369 | import numpy as np
from scipy.optimize import fmin_cobyla
class MultiTissueConvexOptimizer:
"""
Secondary optimizer for including S0 tissue response values into the volume
fraction estimation.
Following the suggestion by [1]_, when including S0 responses, the
volume fractions are no longer unity constrained. This means that the
optimization of linear volume fractions and non-linear parameters is
independent, and thus this secondary optimization is just a simple convex
optimization on the volume fractions only.
Parameters
----------
model: dmipy multi-compartment model instance,
dmipy initialized mc model.
S0_tissue_responses: list,
constains the positive S0 tissue responses that are associated with the
tissue that each compartment model in the mc-model represents.
References
----------
.. [1] Dell'Acqua, Flavio, and J-Donald Tournier. "Modelling white matter
with spherical deconvolution: How and why?." NMR in Biomedicine 32.4
(2019): e3945.
"""
def __init__(self, acquisition_scheme, model, S0_tissue_responses):
self.acquisition_scheme = acquisition_scheme
self.model = model
self.S0_tissue_responses = S0_tissue_responses
def cobyla_cost_function(self, fractions, phi, data):
"Objective function of linear parameter estimation using COBYLA."
E_hat = np.dot(phi, fractions)
diff = data - E_hat
objective = np.dot(diff, diff)
return objective * 1e5
def __call__(self, data, x0):
params = x0 * self.model.scales_for_optimization
params_dict = self.model.parameter_vector_to_parameters(params)
phi = self.model(self.acquisition_scheme,
quantity="stochastic cost function", **params_dict)
phi *= self.S0_tissue_responses
if self.model.N_models == 1:
vf_x0 = [1.]
else:
vf_x0 = x0[-self.model.N_models:]
vf = fmin_cobyla(self.cobyla_cost_function, x0=vf_x0,
cons=[cobyla_positivity_constraint],
args=(phi, data),
maxfun=2000)
return vf
def cobyla_positivity_constraint(volume_fractions, *args):
"COBYLA positivity constraint on volume fractions"
return volume_fractions - 0.001
|
the-stack_0_20370 | import random
def setup(seed):
global state
state = 0
for i in xrange(16):
cur = seed & 3
seed >>= 2
state = (state << 4) | ((state & 3) ^ cur)
state |= cur << 2
def next(bits):
global state
ret = 0
for i in xrange(bits):
ret <<= 1
ret |= state & 1
state = (state << 1) ^ (state >> 61)
state &= 0xFFFFFFFFFFFFFFFF
state ^= 0xFFFFFFFFFFFFFFFF
for j in xrange(0, 64, 4):
cur = (state >> j) & 0xF
cur = (cur >> 3) | ((cur >> 2) & 2) | ((cur << 3) & 8) | ((cur << 2) & 4)
state ^= cur << j
return ret, state
setup((random.randrange(0x10000) << 16) | random.randrange(0x10000)) |
the-stack_0_20371 | # Angel Villa
from tkinter import *
from tkinter.ttk import *
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
# allows buttons to take multiple inputs
from functools import partial
import numpy as np
import dsp_functions as df
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.nb = Notebook(self.master)
self.nb.pack(side=TOP, fill=BOTH, expand=1)
# self.vals_ij corresponds to the values handled by the jth plot on the ith tab
self.n = np.linspace(-2*np.pi,2*np.pi,256)
self.vals_11 = self.n*0
self.vals_12 = self.n*0
self.vals_13 = self.n*0
self.vals_21 = self.n*0
self.vals_22 = self.n*0
# Three frames in convolution tab, two for input and one for output
self.f_1 = Frame(self.nb)
self.f_11 = Frame(self.f_1)
self.f_12 = Frame(self.f_1)
self.f_13 = Frame(self.f_1)
self.f_11.pack(fill=BOTH)
self.f_12.pack(fill=BOTH)
self.f_13.pack(fill=BOTH)
self.nb.add(self.f_1, text="Convolution")
# Two frames in PSD tab, one for input and one for output
self.f_2 = Frame(self.nb)
self.f_21 = Frame(self.f_2)
self.f_22 = Frame(self.f_2)
self.f_21.pack(fill=BOTH)
self.f_22.pack(fill=BOTH)
self.nb.add(self.f_2, text="Power Spectral Density")
self.nb.select(self.f_1)
self.nb.enable_traversal()
# display the two tabs
self.convolution_tab()
self.psd_tab()
def convolution_tab(self):
# First convolution figure
self.fig_11 = Figure(figsize=(3,2), dpi=100)
self.fig_11.add_subplot(111).stem(self.n, self.vals_11, markerfmt="None", use_line_collection=True)
# Second convolution figure
self.fig_12 = Figure(figsize=(3,2), dpi=100)
self.fig_12.add_subplot(111).stem(self.n, self.vals_12, markerfmt="None", use_line_collection=True)
# Output convolution figure
self.fig_13 = Figure(figsize=(3,2), dpi=100)
self.fig_13.add_subplot(111).stem(self.n, self.vals_13, markerfmt="None", use_line_collection=True)
# Add figure 1 and toolbar to canvas 1
self.canvas_11 = FigureCanvasTkAgg(self.fig_11, master=self.f_11)
self.canvas_11.draw()
self.canvas_11.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=10)
self.toolbar_11 = NavigationToolbar2Tk(self.canvas_11, self.f_11)
self.toolbar_11.update()
self.canvas_11.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
# Figure 1 buttons
self.button_11 = Button(self.f_11, text="Step", command=partial(self.update_11, df.step, self.fig_11, self.canvas_11, self.n))
self.button_11.pack(side=RIGHT, padx=(0,10))
self.button_12 = Button(self.f_11, text="Cosine", command=partial(self.update_11, df.cosine, self.fig_11, self.canvas_11, self.n))
self.button_12.pack(side=RIGHT)
self.button_13 = Button(self.f_11, text="Sine", command=partial(self.update_11, df.sine, self.fig_11, self.canvas_11, self.n))
self.button_13.pack(side=RIGHT)
self.button_14 = Button(self.f_11, text="Rectangle", command=partial(self.update_11, df.rectangle, self.fig_11, self.canvas_11, self.n))
self.button_14.pack(side=RIGHT)
self.button_15 = Button(self.f_11, text="Triangle", command=partial(self.update_11, df.triangle, self.fig_11, self.canvas_11, self.n))
self.button_15.pack(side=RIGHT)
self.button_16 = Button(self.f_11, text="Ramp", command=partial(self.update_11, df.ramp, self.fig_11, self.canvas_11, self.n))
self.button_16.pack(side=RIGHT)
self.button_17 = Button(self.f_11, text="Sawtooth (lf)", command=partial(self.update_11, df.sawtooth_lf, self.fig_11, self.canvas_11, self.n))
self.button_17.pack(side=RIGHT)
self.button_18 = Button(self.f_11, text="Sawtooth (hf)", command=partial(self.update_11, df.sawtooth_hf, self.fig_11, self.canvas_11, self.n))
self.button_18.pack(side=RIGHT)
self.button_19 = Button(self.f_11, text="Square (lf)", command=partial(self.update_11, df.square_lf, self.fig_11, self.canvas_11, self.n))
self.button_19.pack(side=RIGHT)
self.button_110 = Button(self.f_11, text="Square wave (hf)", command=partial(self.update_11, df.square_hf, self.fig_11, self.canvas_11, self.n))
self.button_110.pack(side=RIGHT)
self.button_111 = Button(self.f_11, text="Gaussian noise", command=partial(self.update_11, df.gaussian_noise, self.fig_11, self.canvas_11, self.n))
self.button_111.pack(side=RIGHT)
# Add figure 2 and toolbar to canvas 2
self.canvas_12 = FigureCanvasTkAgg(self.fig_12, master=self.f_12)
self.canvas_12.draw()
self.canvas_12.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=10)
self.toolbar_12 = NavigationToolbar2Tk(self.canvas_12, self.f_12)
self.toolbar_12.update()
self.canvas_12.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
# Figure 2 buttons
self.button_112 = Button(self.f_12, text="Step", command=partial(self.update_12, df.step, self.fig_12, self.canvas_12, self.n))
self.button_112.pack(side=RIGHT, padx=(0,10))
self.button_113 = Button(self.f_12, text="Cosine", command=partial(self.update_12, df.cosine, self.fig_12, self.canvas_12, self.n))
self.button_113.pack(side=RIGHT)
self.button_114 = Button(self.f_12, text="Sine", command=partial(self.update_12, df.sine, self.fig_12, self.canvas_12, self.n))
self.button_114.pack(side=RIGHT)
self.button_115 = Button(self.f_12, text="Rectangle", command=partial(self.update_12, df.rectangle, self.fig_12, self.canvas_12, self.n))
self.button_115.pack(side=RIGHT)
self.button_116 = Button(self.f_12, text="Triangle", command=partial(self.update_12, df.triangle, self.fig_12, self.canvas_12, self.n))
self.button_116.pack(side=RIGHT)
self.button_117 = Button(self.f_12, text="Ramp", command=partial(self.update_12, df.ramp, self.fig_12, self.canvas_12, self.n))
self.button_117.pack(side=RIGHT)
self.button_118 = Button(self.f_12, text="Sawtooth (lf)", command=partial(self.update_12, df.sawtooth_lf, self.fig_12, self.canvas_12, self.n))
self.button_118.pack(side=RIGHT)
self.button_119 = Button(self.f_12, text="Sawtooth (hf)", command=partial(self.update_12, df.sawtooth_hf, self.fig_12, self.canvas_12, self.n))
self.button_119.pack(side=RIGHT)
self.button_120 = Button(self.f_12, text="Square (lf)", command=partial(self.update_12, df.square_lf, self.fig_12, self.canvas_12, self.n))
self.button_120.pack(side=RIGHT)
self.button_121 = Button(self.f_12, text="Square wave (hf)", command=partial(self.update_12, df.square_hf, self.fig_12, self.canvas_12, self.n))
self.button_121.pack(side=RIGHT)
self.button_122 = Button(self.f_12, text="Gaussian noise", command=partial(self.update_12, df.gaussian_noise, self.fig_12, self.canvas_12, self.n))
self.button_122.pack(side=RIGHT)
# Add figure 3 and toolbar to canvas 3
self.canvas_13 = FigureCanvasTkAgg(self.fig_13, master=self.f_13)
self.canvas_13.draw()
self.canvas_13.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=10)
self.toolbar_13 = NavigationToolbar2Tk(self.canvas_13, self.f_13)
self.toolbar_13.update()
self.canvas_13.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
# Figure 3 button
self.button_121 = Button(self.f_13, text="Convolve", command=partial(self.update_13, df.conv, self.fig_13, self.canvas_13))
self.button_121.pack()
self.canvas_11.mpl_connect("key_press_event", self.on_key_press)
self.canvas_12.mpl_connect("key_press_event", self.on_key_press)
self.canvas_13.mpl_connect("key_press_event", self.on_key_press)
def psd_tab(self):
# Second tab, shows input signal and output PSD
# First PSD figure
self.fig_21 = Figure(figsize=(3,3), dpi=100)
self.fig_21.add_subplot(111).stem(self.n, self.vals_21, markerfmt="None", use_line_collection=True)
# Second PSD Figure
self.fig_22 = Figure(figsize=(3,3), dpi=100)
self.fig_22.add_subplot(111).stem(self.n, self.vals_22, markerfmt="None", use_line_collection=True)
# Add figure 1 and toolbar to canvas 1
self.canvas_21 = FigureCanvasTkAgg(self.fig_21, master=self.f_21)
self.canvas_21.draw()
self.canvas_21.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
self.toolbar_21 = NavigationToolbar2Tk(self.canvas_21, self.f_21)
self.toolbar_21.update()
self.canvas_21.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
# Figure 1 buttons
self.button_21 = Button(self.f_21, text="Step", command=partial(self.update_21, df.step, self.fig_21, self.canvas_21, self.n))
self.button_21.pack(side=RIGHT, padx=(0,10))
self.button_22 = Button(self.f_21, text="Cosine", command=partial(self.update_21, df.cosine, self.fig_21, self.canvas_21, self.n))
self.button_22.pack(side=RIGHT)
self.button_23 = Button(self.f_21, text="Sine", command=partial(self.update_21, df.sine, self.fig_21, self.canvas_21, self.n))
self.button_23.pack(side=RIGHT)
self.button_24 = Button(self.f_21, text="Rectangle", command=partial(self.update_21, df.rectangle, self.fig_21, self.canvas_21, self.n))
self.button_24.pack(side=RIGHT)
self.button_25 = Button(self.f_21, text="Triangle", command=partial(self.update_21, df.triangle, self.fig_21, self.canvas_21, self.n))
self.button_25.pack(side=RIGHT)
self.button_26 = Button(self.f_21, text="Ramp", command=partial(self.update_21, df.ramp, self.fig_21, self.canvas_21, self.n))
self.button_26.pack(side=RIGHT)
self.button_27 = Button(self.f_21, text="Sawtooth (lf)", command=partial(self.update_21, df.sawtooth_lf, self.fig_21, self.canvas_21, self.n))
self.button_27.pack(side=RIGHT)
self.button_28 = Button(self.f_21, text="Sawtooth (hf)", command=partial(self.update_21, df.sawtooth_hf, self.fig_21, self.canvas_21, self.n))
self.button_28.pack(side=RIGHT)
self.button_29 = Button(self.f_21, text="Square (lf)", command=partial(self.update_21, df.square_lf, self.fig_21, self.canvas_21, self.n))
self.button_29.pack(side=RIGHT)
self.button_210 = Button(self.f_21, text="Square (hf)", command=partial(self.update_21, df.square_hf, self.fig_21, self.canvas_21, self.n))
self.button_210.pack(side=RIGHT)
self.button_211 = Button(self.f_21, text="Gaussian noise", command=partial(self.update_21, df.gaussian_noise, self.fig_21, self.canvas_21, self.n))
self.button_211.pack(side=RIGHT)
# Add figure 2 and toolbar to canvas 2
self.canvas_22 = FigureCanvasTkAgg(self.fig_22, master=self.f_22)
self.canvas_22.draw()
self.canvas_22.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=10)
self.toolbar_22 = NavigationToolbar2Tk(self.canvas_22, self.f_22)
self.toolbar_22.update()
self.canvas_22.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1, padx=10, pady=5)
# Figure 2 button
self.button_212 = Button(self.f_22, text="Power Spectral Density", command=partial(self.update_22, df.psd, self.fig_22, self.canvas_22))
self.button_212.pack()
self.canvas_21.mpl_connect("key_press_event", self.on_key_press)
self.canvas_22.mpl_connect("key_press_event", self.on_key_press)
# handle key presses
def on_key_press(self, event):
print("you pressed {}".format(event.key))
key_press_handler(event, self.canvas, self.toolbar)
# update plots, functions called on the respective button click
def update_11(self, func, fig, canvas, n):
self.vals_11 = func(fig, canvas, n)
def update_12(self, func, fig, canvas, n):
self.vals_12 = func(fig, canvas, n)
def update_13(self, func, fig, canvas):
self.vals_13 = func(fig, canvas, self.vals_11, self.vals_12)
def update_21(self, func, fig, canvas, n):
self.vals_21 = func(fig, canvas, n)
def update_22(self, func, fig, canvas):
self.vals_22 = func(fig, canvas, self.vals_21)
def main():
root = Tk()
# state('zoomed') forces app to open maximized
root.state('zoomed')
app = Window(root)
root.wm_title("Signals")
root.geometry("1366x768")
root.mainloop()
if __name__ == '__main__':
main()
|
the-stack_0_20372 | import statsmodels.api as sm
def jensen_alpha_beta(risk_returns ,benchmark_returns,Rebalancement_frequency):
"""
Compute the Beta and alpha of the investment under the CAPM
Parameters
----------
risk_returns : np.ndarray
benchmark_returns : np.ndarray
Rebalancement_frequency : np.float64
Returns
----------
np.float64,Beta,np.float64,Alpha
"""
benchmark_returns = sm.add_constant(benchmark_returns)
model = sm.OLS(risk_returns,benchmark_returns).fit()
alpha,beta = model.params[0] * Rebalancement_frequency , model.params[1]
return beta,alpha |
the-stack_0_20375 | import datetime
import functools
import json
import logging
import os
import random
import socket
import six
import sys
import time
import urllib3
import yaml
from urllib3 import Timeout
from urllib3.exceptions import HTTPError
from six.moves.http_client import HTTPException
from threading import Condition, Lock, Thread
from . import AbstractDCS, Cluster, ClusterConfig, Failover, Leader, Member, SyncState, TimelineHistory
from ..exceptions import DCSError
from ..utils import deep_compare, iter_response_objects, keepalive_socket_options,\
Retry, RetryFailedError, tzutc, uri, USER_AGENT
logger = logging.getLogger(__name__)
KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config')
SERVICE_HOST_ENV_NAME = 'KUBERNETES_SERVICE_HOST'
SERVICE_PORT_ENV_NAME = 'KUBERNETES_SERVICE_PORT'
SERVICE_TOKEN_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/token'
SERVICE_CERT_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
class KubernetesError(DCSError):
pass
# this function does the same mapping of snake_case => camelCase for > 97% of cases as autogenerated swagger code
def to_camel_case(value):
reserved = {'api', 'apiv3', 'cidr', 'cpu', 'csi', 'id', 'io', 'ip', 'ipc', 'pid', 'tls', 'uri', 'url', 'uuid'}
words = value.split('_')
return words[0] + ''.join(w.upper() if w in reserved else w.title() for w in words[1:])
class K8sConfig(object):
class ConfigException(Exception):
pass
def __init__(self):
self.pool_config = {'maxsize': 10, 'num_pools': 10} # configuration for urllib3.PoolManager
self._make_headers()
def _make_headers(self, token=None, **kwargs):
self._headers = urllib3.make_headers(user_agent=USER_AGENT, **kwargs)
if token:
self._headers['authorization'] = 'Bearer ' + token
def load_incluster_config(self):
if SERVICE_HOST_ENV_NAME not in os.environ or SERVICE_PORT_ENV_NAME not in os.environ:
raise self.ConfigException('Service host/port is not set.')
if not os.environ[SERVICE_HOST_ENV_NAME] or not os.environ[SERVICE_PORT_ENV_NAME]:
raise self.ConfigException('Service host/port is set but empty.')
if not os.path.isfile(SERVICE_CERT_FILENAME):
raise self.ConfigException('Service certificate file does not exists.')
with open(SERVICE_CERT_FILENAME) as f:
if not f.read():
raise self.ConfigException('Cert file exists but empty.')
if not os.path.isfile(SERVICE_TOKEN_FILENAME):
raise self.ConfigException('Service token file does not exists.')
with open(SERVICE_TOKEN_FILENAME) as f:
token = f.read()
if not token:
raise self.ConfigException('Token file exists but empty.')
self._make_headers(token=token)
self.pool_config['ca_certs'] = SERVICE_CERT_FILENAME
self._server = uri('https', (os.environ[SERVICE_HOST_ENV_NAME], os.environ[SERVICE_PORT_ENV_NAME]))
@staticmethod
def _get_by_name(config, section, name):
for c in config[section + 's']:
if c['name'] == name:
return c[section]
def load_kube_config(self, context=None):
with open(os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)) as f:
config = yaml.safe_load(f)
context = self._get_by_name(config, 'context', context or config['current-context'])
cluster = self._get_by_name(config, 'cluster', context['cluster'])
user = self._get_by_name(config, 'user', context['user'])
self._server = cluster['server'].rstrip('/')
if self._server.startswith('https'):
self.pool_config.update({v: user[k] for k, v in {'client-certificate': 'cert_file',
'client-key': 'key_file'}.items() if k in user})
if 'certificate-authority' in cluster:
self.pool_config['ca_certs'] = cluster['certificate-authority']
self.pool_config['cert_reqs'] = 'CERT_NONE' if cluster.get('insecure-skip-tls-verify') else 'CERT_REQUIRED'
if user.get('token'):
self._make_headers(token=user['token'])
elif 'username' in user and 'password' in user:
self._headers = self._make_headers(basic_auth=':'.join((user['username'], user['password'])))
@property
def server(self):
return self._server
@property
def headers(self):
return self._headers.copy()
class K8sObject(object):
def __init__(self, kwargs):
self._dict = {k: self._wrap(k, v) for k, v in kwargs.items()}
def get(self, name, default=None):
return self._dict.get(name, default)
def __getattr__(self, name):
return self.get(to_camel_case(name))
@classmethod
def _wrap(cls, parent, value):
if isinstance(value, dict):
# we know that `annotations` and `labels` are dicts and therefore don't want to convert them into K8sObject
return value if parent in {'annotations', 'labels'} and \
all(isinstance(v, six.string_types) for v in value.values()) else cls(value)
elif isinstance(value, list):
return [cls._wrap(None, v) for v in value]
else:
return value
def to_dict(self):
return self._dict
def __repr__(self):
return json.dumps(self, indent=4, default=lambda o: o.to_dict())
class K8sException(Exception):
pass
class K8sConnectionFailed(K8sException):
pass
class K8sClient(object):
class rest(object):
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
self.status = http_resp.status if http_resp else status
self.reason = http_resp.reason if http_resp else reason
self.body = http_resp.data if http_resp else None
self.headers = http_resp.getheaders() if http_resp else None
def __str__(self):
error_message = "({0})\nReason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class ApiClient(object):
_API_URL_PREFIX = '/api/v1/namespaces/'
def __init__(self, bypass_api_service=False):
self._bypass_api_service = bypass_api_service
self.pool_manager = urllib3.PoolManager(**k8s_config.pool_config)
self._base_uri = k8s_config.server
self._api_servers_cache = [k8s_config.server]
self._api_servers_cache_updated = 0
self.set_api_servers_cache_ttl(10)
self.set_read_timeout(10)
try:
self._load_api_servers_cache()
except K8sException:
pass
def set_read_timeout(self, timeout):
self._read_timeout = timeout
def set_api_servers_cache_ttl(self, ttl):
self._api_servers_cache_ttl = ttl - 0.5
def set_base_uri(self, value):
logger.info('Selected new K8s API server endpoint %s', value)
# We will connect by IP of the master node which is not listed as alternative name
self.pool_manager.connection_pool_kw['assert_hostname'] = False
self._base_uri = value
@staticmethod
def _handle_server_response(response, _preload_content):
if response.status not in range(200, 206):
raise k8s_client.rest.ApiException(http_resp=response)
return K8sObject(json.loads(response.data.decode('utf-8'))) if _preload_content else response
@staticmethod
def _make_headers(headers):
ret = k8s_config.headers
ret.update(headers or {})
return ret
@property
def api_servers_cache(self):
base_uri, cache = self._base_uri, self._api_servers_cache
return ([base_uri] if base_uri in cache else []) + [machine for machine in cache if machine != base_uri]
def _get_api_servers(self, api_servers_cache):
_, per_node_timeout, per_node_retries = self._calculate_timeouts(len(api_servers_cache))
kwargs = {'headers': self._make_headers({}), 'preload_content': True, 'retries': per_node_retries,
'timeout': urllib3.Timeout(connect=max(1, per_node_timeout/2.0), total=per_node_timeout)}
path = self._API_URL_PREFIX + 'default/endpoints/kubernetes'
for base_uri in api_servers_cache:
try:
response = self.pool_manager.request('GET', base_uri + path, **kwargs)
endpoint = self._handle_server_response(response, True)
for subset in endpoint.subsets:
for port in subset.ports:
if port.name == 'https' and port.protocol == 'TCP':
addresses = [uri('https', (a.ip, port.port)) for a in subset.addresses]
if addresses:
random.shuffle(addresses)
return addresses
except Exception as e:
if isinstance(e, k8s_client.rest.ApiException) and e.status == 403:
raise
self.pool_manager.clear()
logger.error('Failed to get "kubernetes" endpoint from %s: %r', base_uri, e)
raise K8sConnectionFailed('No more K8s API server nodes in the cluster')
def _refresh_api_servers_cache(self, updating_cache=False):
if self._bypass_api_service:
try:
api_servers_cache = [k8s_config.server] if updating_cache else self.api_servers_cache
self._api_servers_cache = self._get_api_servers(api_servers_cache)
if updating_cache:
self.pool_manager.clear()
except k8s_client.rest.ApiException: # 403 Permission denied
logger.warning("Kubernetes RBAC doesn't allow GET access to the 'kubernetes' "
"endpoint in the 'default' namespace. Disabling 'bypass_api_service'.")
self._bypass_api_service = False
self._api_servers_cache = [k8s_config.server]
if not updating_cache:
self.pool_manager.clear()
except K8sConnectionFailed:
if updating_cache:
raise K8sException("Could not get the list of K8s API server nodes")
return
else:
self._api_servers_cache = [k8s_config.server]
if self._base_uri not in self._api_servers_cache:
self.set_base_uri(self._api_servers_cache[0])
self._api_servers_cache_updated = time.time()
def refresh_api_servers_cache(self):
if self._bypass_api_service and time.time() - self._api_servers_cache_updated > self._api_servers_cache_ttl:
self._refresh_api_servers_cache()
def _load_api_servers_cache(self):
self._update_api_servers_cache = True
self._refresh_api_servers_cache(True)
self._update_api_servers_cache = False
def _calculate_timeouts(self, api_servers, timeout=None):
"""Calculate a request timeout and number of retries per single K8s API server node.
In case if the timeout per node is too small (less than one second) we will reduce the number of nodes.
For the cluster with only one API server node we will try to do 1 retry.
No retries for clusters with 2 or more API server nodes. We better rely on switching to a different node."""
per_node_timeout = timeout = float(timeout or self._read_timeout)
max_retries = 3 - min(api_servers, 2)
per_node_retries = 1
min_timeout = 1.0
while api_servers > 0:
per_node_timeout = float(timeout) / api_servers
if per_node_timeout >= min_timeout:
# for small clusters we will try to do more than one try on every node
while per_node_retries < max_retries and per_node_timeout / (per_node_retries + 1) >= min_timeout:
per_node_retries += 1
per_node_timeout /= per_node_retries
break
# if the timeout per one node is to small try to reduce number of nodes
api_servers -= 1
max_retries = 1
return api_servers, per_node_timeout, per_node_retries - 1
def _do_http_request(self, retry, api_servers_cache, method, path, **kwargs):
some_request_failed = False
for i, base_uri in enumerate(api_servers_cache):
if i > 0:
logger.info('Retrying on %s', base_uri)
try:
response = self.pool_manager.request(method, base_uri + path, **kwargs)
if some_request_failed:
self.set_base_uri(base_uri)
self._refresh_api_servers_cache()
return response
except (HTTPError, HTTPException, socket.error, socket.timeout) as e:
self.pool_manager.clear()
if not retry:
# switch to the next node if request failed and retry is not allowed
if i + 1 < len(api_servers_cache):
self.set_base_uri(api_servers_cache[i + 1])
raise K8sException('{0} {1} request failed'.format(method, path))
logger.error('Request to server %s failed: %r', base_uri, e)
some_request_failed = True
raise K8sConnectionFailed('No more API server nodes in the cluster')
def request(self, retry, method, path, timeout=None, **kwargs):
if self._update_api_servers_cache:
self._load_api_servers_cache()
api_servers_cache = self.api_servers_cache
api_servers = len(api_servers_cache)
if timeout:
if isinstance(timeout, six.integer_types + (float,)):
timeout = urllib3.Timeout(total=timeout)
elif isinstance(timeout, tuple) and len(timeout) == 2:
timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1])
retries = 0
else:
_, timeout, retries = self._calculate_timeouts(api_servers)
timeout = urllib3.Timeout(connect=max(1, timeout/2.0), total=timeout)
kwargs.update(retries=retries, timeout=timeout)
while True:
try:
return self._do_http_request(retry, api_servers_cache, method, path, **kwargs)
except K8sConnectionFailed as ex:
try:
self._load_api_servers_cache()
api_servers_cache = self.api_servers_cache
api_servers = len(api_servers)
except Exception as e:
logger.debug('Failed to update list of K8s master nodes: %r', e)
sleeptime = retry.sleeptime
remaining_time = retry.stoptime - sleeptime - time.time()
nodes, timeout, retries = self._calculate_timeouts(api_servers, remaining_time)
if nodes == 0:
self._update_api_servers_cache = True
raise ex
retry.sleep_func(sleeptime)
retry.update_delay()
# We still have some time left. Partially reduce `api_servers_cache` and retry request
kwargs.update(timeout=urllib3.Timeout(connect=max(1, timeout/2.0), total=timeout), retries=retries)
api_servers_cache = api_servers_cache[:nodes]
def call_api(self, method, path, headers=None, body=None, _retry=None,
_preload_content=True, _request_timeout=None, **kwargs):
headers = self._make_headers(headers)
fields = {to_camel_case(k): v for k, v in kwargs.items()} # resource_version => resourceVersion
body = json.dumps(body, default=lambda o: o.to_dict()) if body is not None else None
response = self.request(_retry, method, self._API_URL_PREFIX + path, headers=headers, fields=fields,
body=body, preload_content=_preload_content, timeout=_request_timeout)
return self._handle_server_response(response, _preload_content)
class CoreV1Api(object):
def __init__(self, api_client=None):
self._api_client = api_client or k8s_client.ApiClient()
def __getattr__(self, func): # `func` name pattern: (action)_namespaced_(kind)
action, kind = func.split('_namespaced_') # (read|list|create|patch|replace|delete|delete_collection)
kind = kind.replace('_', '') + ('s' * int(kind[-1] != 's')) # plural, single word
def wrapper(*args, **kwargs):
method = {'read': 'GET', 'list': 'GET', 'create': 'POST',
'replace': 'PUT'}.get(action, action.split('_')[0]).upper()
if action == 'create' or len(args) == 1: # namespace is a first argument and name in not in arguments
path = '/'.join([args[0], kind])
else: # name, namespace followed by optional body
path = '/'.join([args[1], kind, args[0]])
headers = {'Content-Type': 'application/strategic-merge-patch+json'} if action == 'patch' else {}
if len(args) == 3: # name, namespace, body
body = args[2]
elif action == 'create': # namespace, body
body = args[1]
elif action == 'delete': # name, namespace
body = kwargs.pop('body', None)
else:
body = None
return self._api_client.call_api(method, path, headers, body, **kwargs)
return wrapper
class _K8sObjectTemplate(K8sObject):
"""The template for objects which we create locally, e.g. k8s_client.V1ObjectMeta & co"""
def __init__(self, **kwargs):
self._dict = {to_camel_case(k): v for k, v in kwargs.items()}
def __init__(self):
self.__cls_cache = {}
self.__cls_lock = Lock()
def __getattr__(self, name):
with self.__cls_lock:
if name not in self.__cls_cache:
self.__cls_cache[name] = type(name, (self._K8sObjectTemplate,), {})
return self.__cls_cache[name]
k8s_client = K8sClient()
k8s_config = K8sConfig()
class KubernetesRetriableException(k8s_client.rest.ApiException):
def __init__(self, orig):
super(KubernetesRetriableException, self).__init__(orig.status, orig.reason)
self.body = orig.body
self.headers = orig.headers
@property
def sleeptime(self):
try:
return int(self.headers['retry-after'])
except Exception:
return None
class CoreV1ApiProxy(object):
def __init__(self, use_endpoints=False, bypass_api_service=False):
self._api_client = k8s_client.ApiClient(bypass_api_service)
self._core_v1_api = k8s_client.CoreV1Api(self._api_client)
self._use_endpoints = bool(use_endpoints)
def configure_timeouts(self, loop_wait, retry_timeout, ttl):
# Normally every loop_wait seconds we should have receive something from the socket.
# If we didn't received anything after the loop_wait + retry_timeout it is a time
# to start worrying (send keepalive messages). Finally, the connection should be
# considered as dead if we received nothing from the socket after the ttl seconds.
self._api_client.pool_manager.connection_pool_kw['socket_options'] = \
list(keepalive_socket_options(ttl, int(loop_wait + retry_timeout)))
self._api_client.set_read_timeout(retry_timeout)
self._api_client.set_api_servers_cache_ttl(loop_wait)
def refresh_api_servers_cache(self):
self._api_client.refresh_api_servers_cache()
def __getattr__(self, func):
if func.endswith('_kind'):
func = func[:-4] + ('endpoints' if self._use_endpoints else 'config_map')
def wrapper(*args, **kwargs):
try:
return getattr(self._core_v1_api, func)(*args, **kwargs)
except k8s_client.rest.ApiException as e:
if e.status in (500, 503, 504) or e.headers and 'retry-after' in e.headers: # XXX
raise KubernetesRetriableException(e)
raise
return wrapper
@property
def use_endpoints(self):
return self._use_endpoints
def catch_kubernetes_errors(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except k8s_client.rest.ApiException as e:
if e.status == 403:
logger.exception('Permission denied')
elif e.status != 409: # Object exists or conflict in resource_version
logger.exception('Unexpected error from Kubernetes API')
return False
except (RetryFailedError, K8sException):
return False
return wrapper
class ObjectCache(Thread):
def __init__(self, dcs, func, retry, condition, name=None):
Thread.__init__(self)
self.daemon = True
self._dcs = dcs
self._func = func
self._retry = retry
self._condition = condition
self._name = name # name of this pod
self._is_ready = False
self._object_cache = {}
self._object_cache_lock = Lock()
self._annotations_map = {self._dcs.leader_path: self._dcs._LEADER, self._dcs.config_path: self._dcs._CONFIG}
self.start()
def _list(self):
try:
return self._func(_retry=self._retry.copy())
except Exception:
time.sleep(1)
raise
def _watch(self, resource_version):
return self._func(_request_timeout=(self._retry.deadline, Timeout.DEFAULT_TIMEOUT),
_preload_content=False, watch=True, resource_version=resource_version)
def set(self, name, value):
with self._object_cache_lock:
old_value = self._object_cache.get(name)
ret = not old_value or int(old_value.metadata.resource_version) < int(value.metadata.resource_version)
if ret:
self._object_cache[name] = value
return ret, old_value
def delete(self, name, resource_version):
with self._object_cache_lock:
old_value = self._object_cache.get(name)
ret = old_value and int(old_value.metadata.resource_version) < int(resource_version)
if ret:
del self._object_cache[name]
return not old_value or ret, old_value
def copy(self):
with self._object_cache_lock:
return self._object_cache.copy()
def get(self, name):
with self._object_cache_lock:
return self._object_cache.get(name)
def _build_cache(self):
objects = self._list()
return_type = 'V1' + objects.kind[:-4]
with self._object_cache_lock:
self._object_cache = {item.metadata.name: item for item in objects.items}
with self._condition:
self._is_ready = True
self._condition.notify()
response = self._watch(objects.metadata.resource_version)
try:
for event in iter_response_objects(response):
obj = event['object']
if obj.get('code') == 410:
break
ev_type = event['type']
name = obj['metadata']['name']
if ev_type in ('ADDED', 'MODIFIED'):
obj = K8sObject(obj)
success, old_value = self.set(name, obj)
if success:
new_value = (obj.metadata.annotations or {}).get(self._annotations_map.get(name))
elif ev_type == 'DELETED':
success, old_value = self.delete(name, obj['metadata']['resourceVersion'])
new_value = None
else:
logger.warning('Unexpected event type: %s', ev_type)
continue
if success and return_type != 'V1Pod':
if old_value:
old_value = (old_value.metadata.annotations or {}).get(self._annotations_map.get(name))
value_changed = old_value != new_value and \
(name != self._dcs.config_path or old_value is not None and new_value is not None)
if value_changed:
logger.debug('%s changed from %s to %s', name, old_value, new_value)
# Do not wake up HA loop if we run as leader and received leader object update event
if value_changed or name == self._dcs.leader_path and self._name != new_value:
self._dcs.event.set()
finally:
with self._condition:
self._is_ready = False
response.close()
response.release_conn()
def run(self):
while True:
try:
self._build_cache()
except Exception as e:
with self._condition:
self._is_ready = False
logger.error('ObjectCache.run %r', e)
def is_ready(self):
"""Must be called only when holding the lock on `_condition`"""
return self._is_ready
class Kubernetes(AbstractDCS):
def __init__(self, config):
self._labels = config['labels']
self._labels[config.get('scope_label', 'cluster-name')] = config['scope']
self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items())
self._namespace = config.get('namespace') or 'default'
self._role_label = config.get('role_label', 'role')
config['namespace'] = ''
super(Kubernetes, self).__init__(config)
self._retry = Retry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1,
retry_exceptions=KubernetesRetriableException)
self._ttl = None
try:
k8s_config.load_incluster_config()
except k8s_config.ConfigException:
k8s_config.load_kube_config(context=config.get('context', 'local'))
self.__my_pod = None
self.__ips = [] if config.get('patronictl') else [config.get('pod_ip')]
self.__ports = []
for p in config.get('ports', [{}]):
port = {'port': int(p.get('port', '5432'))}
port.update({n: p[n] for n in ('name', 'protocol') if p.get(n)})
self.__ports.append(k8s_client.V1EndpointPort(**port))
bypass_api_service = not config.get('patronictl') and config.get('bypass_api_service')
self._api = CoreV1ApiProxy(config.get('use_endpoints'), bypass_api_service)
self._should_create_config_service = self._api.use_endpoints
self.reload_config(config)
# leader_observed_record, leader_resource_version, and leader_observed_time are used only for leader race!
self._leader_observed_record = {}
self._leader_observed_time = None
self._leader_resource_version = None
self.__do_not_watch = False
self._condition = Condition()
pods_func = functools.partial(self._api.list_namespaced_pod, self._namespace,
label_selector=self._label_selector)
self._pods = ObjectCache(self, pods_func, self._retry, self._condition)
kinds_func = functools.partial(self._api.list_namespaced_kind, self._namespace,
label_selector=self._label_selector)
self._kinds = ObjectCache(self, kinds_func, self._retry, self._condition, self._name)
def retry(self, *args, **kwargs):
retry = self._retry.copy()
kwargs['_retry'] = retry
return retry(*args, **kwargs)
def client_path(self, path):
return super(Kubernetes, self).client_path(path)[1:].replace('/', '-')
@property
def leader_path(self):
return self._base_path[1:] if self._api.use_endpoints else super(Kubernetes, self).leader_path
def set_ttl(self, ttl):
ttl = int(ttl)
self.__do_not_watch = self._ttl != ttl
self._ttl = ttl
@property
def ttl(self):
return self._ttl
def set_retry_timeout(self, retry_timeout):
self._retry.deadline = retry_timeout
def reload_config(self, config):
super(Kubernetes, self).reload_config(config)
self._api.configure_timeouts(self.loop_wait, self._retry.deadline, self.ttl)
@staticmethod
def member(pod):
annotations = pod.metadata.annotations or {}
member = Member.from_node(pod.metadata.resource_version, pod.metadata.name, None, annotations.get('status', ''))
member.data['pod_labels'] = pod.metadata.labels
return member
def _wait_caches(self, stop_time):
while not (self._pods.is_ready() and self._kinds.is_ready()):
timeout = stop_time - time.time()
if timeout <= 0:
raise RetryFailedError('Exceeded retry deadline')
self._condition.wait(timeout)
def _load_cluster(self):
stop_time = time.time() + self._retry.deadline
self._api.refresh_api_servers_cache()
try:
with self._condition:
self._wait_caches(stop_time)
members = [self.member(pod) for pod in self._pods.copy().values()]
nodes = self._kinds.copy()
config = nodes.get(self.config_path)
metadata = config and config.metadata
annotations = metadata and metadata.annotations or {}
# get initialize flag
initialize = annotations.get(self._INITIALIZE)
# get global dynamic configuration
config = ClusterConfig.from_node(metadata and metadata.resource_version,
annotations.get(self._CONFIG) or '{}',
metadata.resource_version if self._CONFIG in annotations else 0)
# get timeline history
history = TimelineHistory.from_node(metadata and metadata.resource_version,
annotations.get(self._HISTORY) or '[]')
leader = nodes.get(self.leader_path)
metadata = leader and leader.metadata
self._leader_resource_version = metadata.resource_version if metadata else None
annotations = metadata and metadata.annotations or {}
# get last known leader lsn
last_lsn = annotations.get(self._OPTIME)
try:
last_lsn = 0 if last_lsn is None else int(last_lsn)
except Exception:
last_lsn = 0
# get permanent slots state (confirmed_flush_lsn)
slots = annotations.get('slots')
try:
slots = slots and json.loads(slots)
except Exception:
slots = None
# get leader
leader_record = {n: annotations.get(n) for n in (self._LEADER, 'acquireTime',
'ttl', 'renewTime', 'transitions') if n in annotations}
if (leader_record or self._leader_observed_record) and leader_record != self._leader_observed_record:
self._leader_observed_record = leader_record
self._leader_observed_time = time.time()
leader = leader_record.get(self._LEADER)
try:
ttl = int(leader_record.get('ttl')) or self._ttl
except (TypeError, ValueError):
ttl = self._ttl
if not metadata or not self._leader_observed_time or self._leader_observed_time + ttl < time.time():
leader = None
if metadata:
member = Member(-1, leader, None, {})
member = ([m for m in members if m.name == leader] or [member])[0]
leader = Leader(metadata.resource_version, None, member)
# failover key
failover = nodes.get(self.failover_path)
metadata = failover and failover.metadata
failover = Failover.from_node(metadata and metadata.resource_version,
metadata and (metadata.annotations or {}).copy())
# get synchronization state
sync = nodes.get(self.sync_path)
metadata = sync and sync.metadata
sync = SyncState.from_node(metadata and metadata.resource_version, metadata and metadata.annotations)
return Cluster(initialize, config, leader, last_lsn, members, failover, sync, history, slots)
except Exception:
logger.exception('get_cluster')
raise KubernetesError('Kubernetes API is not responding properly')
@staticmethod
def compare_ports(p1, p2):
return p1.name == p2.name and p1.port == p2.port and (p1.protocol or 'TCP') == (p2.protocol or 'TCP')
@staticmethod
def subsets_changed(last_observed_subsets, ip, ports):
"""
>>> Kubernetes.subsets_changed([], None, [])
True
>>> ip = '1.2.3.4'
>>> a = [k8s_client.V1EndpointAddress(ip=ip)]
>>> s = [k8s_client.V1EndpointSubset(addresses=a)]
>>> Kubernetes.subsets_changed(s, '1.2.3.5', [])
True
>>> s = [k8s_client.V1EndpointSubset(addresses=a, ports=[k8s_client.V1EndpointPort(protocol='TCP', port=1)])]
>>> Kubernetes.subsets_changed(s, '1.2.3.4', [k8s_client.V1EndpointPort(port=5432)])
True
>>> p1 = k8s_client.V1EndpointPort(name='port1', port=1)
>>> p2 = k8s_client.V1EndpointPort(name='port2', port=2)
>>> p3 = k8s_client.V1EndpointPort(name='port3', port=3)
>>> s = [k8s_client.V1EndpointSubset(addresses=a, ports=[p1, p2])]
>>> Kubernetes.subsets_changed(s, ip, [p2, p3])
True
>>> s2 = [k8s_client.V1EndpointSubset(addresses=a, ports=[p2, p1])]
>>> Kubernetes.subsets_changed(s, ip, [p2, p1])
False
"""
if len(last_observed_subsets) != 1:
return True
if len(last_observed_subsets[0].addresses or []) != 1 or \
last_observed_subsets[0].addresses[0].ip != ip or \
len(last_observed_subsets[0].ports) != len(ports):
return True
if len(ports) == 1:
return not Kubernetes.compare_ports(last_observed_subsets[0].ports[0], ports[0])
observed_ports = {p.name: p for p in last_observed_subsets[0].ports}
for p in ports:
if p.name not in observed_ports or not Kubernetes.compare_ports(p, observed_ports.pop(p.name)):
return True
return False
def __target_ref(self, leader_ip, latest_subsets, pod):
# we want to re-use existing target_ref if possible
for subset in latest_subsets:
for address in subset.addresses or []:
if address.ip == leader_ip and address.target_ref and address.target_ref.name == self._name:
return address.target_ref
return k8s_client.V1ObjectReference(kind='Pod', uid=pod.metadata.uid, namespace=self._namespace,
name=self._name, resource_version=pod.metadata.resource_version)
def _map_subsets(self, endpoints, ips):
leader = self._kinds.get(self.leader_path)
latest_subsets = leader and leader.subsets or []
if not ips:
# We want to have subsets empty
if latest_subsets:
endpoints['subsets'] = []
return
pod = self._pods.get(self._name)
leader_ip = ips[0] or pod and pod.status.pod_ip
# don't touch subsets if our (leader) ip is unknown or subsets is valid
if leader_ip and self.subsets_changed(latest_subsets, leader_ip, self.__ports):
kwargs = {'hostname': pod.spec.hostname, 'node_name': pod.spec.node_name,
'target_ref': self.__target_ref(leader_ip, latest_subsets, pod)} if pod else {}
address = k8s_client.V1EndpointAddress(ip=leader_ip, **kwargs)
endpoints['subsets'] = [k8s_client.V1EndpointSubset(addresses=[address], ports=self.__ports)]
def _patch_or_create(self, name, annotations, resource_version=None, patch=False, retry=None, ips=None):
metadata = {'namespace': self._namespace, 'name': name, 'labels': self._labels, 'annotations': annotations}
if patch or resource_version:
if resource_version is not None:
metadata['resource_version'] = resource_version
func = functools.partial(self._api.patch_namespaced_kind, name)
else:
func = functools.partial(self._api.create_namespaced_kind)
# skip annotations with null values
metadata['annotations'] = {k: v for k, v in metadata['annotations'].items() if v is not None}
metadata = k8s_client.V1ObjectMeta(**metadata)
if ips is not None and self._api.use_endpoints:
endpoints = {'metadata': metadata}
self._map_subsets(endpoints, ips)
body = k8s_client.V1Endpoints(**endpoints)
else:
body = k8s_client.V1ConfigMap(metadata=metadata)
ret = retry(func, self._namespace, body) if retry else func(self._namespace, body)
if ret:
self._kinds.set(name, ret)
return ret
@catch_kubernetes_errors
def patch_or_create(self, name, annotations, resource_version=None, patch=False, retry=True, ips=None):
if retry is True:
retry = self.retry
return self._patch_or_create(name, annotations, resource_version, patch, retry, ips)
def patch_or_create_config(self, annotations, resource_version=None, patch=False, retry=True):
# SCOPE-config endpoint requires corresponding service otherwise it might be "cleaned" by k8s master
if self._api.use_endpoints and not patch and not resource_version:
self._should_create_config_service = True
self._create_config_service()
return self.patch_or_create(self.config_path, annotations, resource_version, patch, retry)
def _create_config_service(self):
metadata = k8s_client.V1ObjectMeta(namespace=self._namespace, name=self.config_path, labels=self._labels)
body = k8s_client.V1Service(metadata=metadata, spec=k8s_client.V1ServiceSpec(cluster_ip='None'))
try:
if not self._api.create_namespaced_service(self._namespace, body):
return
except Exception as e:
if not isinstance(e, k8s_client.rest.ApiException) or e.status != 409: # Service already exists
return logger.exception('create_config_service failed')
self._should_create_config_service = False
def _write_leader_optime(self, last_lsn):
"""Unused"""
def _write_status(self, value):
"""Unused"""
def _update_leader(self):
"""Unused"""
def _update_leader_with_retry(self, annotations, resource_version, ips):
retry = self._retry.copy()
def _retry(*args, **kwargs):
kwargs['_retry'] = retry
return retry(*args, **kwargs)
try:
return self._patch_or_create(self.leader_path, annotations, resource_version, ips=ips, retry=_retry)
except k8s_client.rest.ApiException as e:
if e.status == 409:
logger.warning('Concurrent update of %s', self.leader_path)
else:
logger.exception('Permission denied' if e.status == 403 else 'Unexpected error from Kubernetes API')
return False
except (RetryFailedError, K8sException):
return False
retry.deadline = retry.stoptime - time.time()
if retry.deadline < 1:
return False
# Try to get the latest version directly from K8s API instead of relying on async cache
try:
kind = retry(self._api.read_namespaced_kind, self.leader_path, self._namespace)
except Exception as e:
logger.error('Failed to get the leader object "%s": %r', self.leader_path, e)
return False
self._kinds.set(self.leader_path, kind)
retry.deadline = retry.stoptime - time.time()
if retry.deadline < 0.5:
return False
kind_annotations = kind and kind.metadata.annotations or {}
kind_resource_version = kind and kind.metadata.resource_version
# There is different leader or resource_version in cache didn't change
if kind and (kind_annotations.get(self._LEADER) != self._name or kind_resource_version == resource_version):
return False
return self.patch_or_create(self.leader_path, annotations, kind_resource_version, ips=ips, retry=_retry)
def update_leader(self, last_lsn, slots=None):
kind = self._kinds.get(self.leader_path)
kind_annotations = kind and kind.metadata.annotations or {}
if kind and kind_annotations.get(self._LEADER) != self._name:
return False
now = datetime.datetime.now(tzutc).isoformat()
leader_observed_record = kind_annotations or self._leader_observed_record
annotations = {self._LEADER: self._name, 'ttl': str(self._ttl), 'renewTime': now,
'acquireTime': leader_observed_record.get('acquireTime') or now,
'transitions': leader_observed_record.get('transitions') or '0'}
if last_lsn:
annotations[self._OPTIME] = str(last_lsn)
annotations['slots'] = json.dumps(slots) if slots else None
resource_version = kind and kind.metadata.resource_version
return self._update_leader_with_retry(annotations, resource_version, self.__ips)
def attempt_to_acquire_leader(self, permanent=False):
now = datetime.datetime.now(tzutc).isoformat()
annotations = {self._LEADER: self._name, 'ttl': str(sys.maxsize if permanent else self._ttl),
'renewTime': now, 'acquireTime': now, 'transitions': '0'}
if self._leader_observed_record:
try:
transitions = int(self._leader_observed_record.get('transitions'))
except (TypeError, ValueError):
transitions = 0
if self._leader_observed_record.get(self._LEADER) != self._name:
transitions += 1
else:
annotations['acquireTime'] = self._leader_observed_record.get('acquireTime') or now
annotations['transitions'] = str(transitions)
ips = [] if self._api.use_endpoints else None
ret = self.patch_or_create(self.leader_path, annotations, self._leader_resource_version, ips=ips)
if not ret:
logger.info('Could not take out TTL lock')
return ret
def take_leader(self):
return self.attempt_to_acquire_leader()
def set_failover_value(self, value, index=None):
"""Unused"""
def manual_failover(self, leader, candidate, scheduled_at=None, index=None):
annotations = {'leader': leader or None, 'member': candidate or None,
'scheduled_at': scheduled_at and scheduled_at.isoformat()}
patch = bool(self.cluster and isinstance(self.cluster.failover, Failover) and self.cluster.failover.index)
return self.patch_or_create(self.failover_path, annotations, index, bool(index or patch), False)
@property
def _config_resource_version(self):
config = self._kinds.get(self.config_path)
return config and config.metadata.resource_version
def set_config_value(self, value, index=None):
return self.patch_or_create_config({self._CONFIG: value}, index, bool(self._config_resource_version), False)
@catch_kubernetes_errors
def touch_member(self, data, permanent=False):
cluster = self.cluster
if cluster and cluster.leader and cluster.leader.name == self._name:
role = 'promoted' if data['role'] in ('replica', 'promoted') else 'master'
elif data['state'] == 'running' and data['role'] != 'master':
role = data['role']
else:
role = None
member = cluster and cluster.get_member(self._name, fallback_to_leader=False)
pod_labels = member and member.data.pop('pod_labels', None)
ret = pod_labels is not None and pod_labels.get(self._role_label) == role and deep_compare(data, member.data)
if not ret:
metadata = {'namespace': self._namespace, 'name': self._name, 'labels': {self._role_label: role},
'annotations': {'status': json.dumps(data, separators=(',', ':'))}}
body = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(**metadata))
ret = self._api.patch_namespaced_pod(self._name, self._namespace, body)
if ret:
self._pods.set(self._name, ret)
if self._should_create_config_service:
self._create_config_service()
return ret
def initialize(self, create_new=True, sysid=""):
cluster = self.cluster
resource_version = cluster.config.index if cluster and cluster.config and cluster.config.index else None
return self.patch_or_create_config({self._INITIALIZE: sysid}, resource_version)
def _delete_leader(self):
"""Unused"""
def delete_leader(self, last_lsn=None):
kind = self._kinds.get(self.leader_path)
if kind and (kind.metadata.annotations or {}).get(self._LEADER) == self._name:
annotations = {self._LEADER: None}
if last_lsn:
annotations[self._OPTIME] = last_lsn
self.patch_or_create(self.leader_path, annotations, kind.metadata.resource_version, True, False, [])
self.reset_cluster()
def cancel_initialization(self):
self.patch_or_create_config({self._INITIALIZE: None}, self._config_resource_version, True)
@catch_kubernetes_errors
def delete_cluster(self):
self.retry(self._api.delete_collection_namespaced_kind, self._namespace, label_selector=self._label_selector)
def set_history_value(self, value):
return self.patch_or_create_config({self._HISTORY: value}, None, bool(self._config_resource_version), False)
def set_sync_state_value(self, value, index=None):
"""Unused"""
def write_sync_state(self, leader, sync_standby, index=None):
return self.patch_or_create(self.sync_path, self.sync_state(leader, sync_standby), index, False)
def delete_sync_state(self, index=None):
return self.write_sync_state(None, None, index)
def watch(self, leader_index, timeout):
if self.__do_not_watch:
self.__do_not_watch = False
return True
try:
return super(Kubernetes, self).watch(None, timeout + 0.5)
finally:
self.event.clear()
|
the-stack_0_20377 | # pylint: disable=no-member, invalid-name, redefined-outer-name, protected-access, too-many-public-methods
from typing import Dict, Tuple
import numpy as np
import pandas as pd
import pytest
from aesara.tensor.subtensor import AdvancedIncSubtensor, AdvancedIncSubtensor1
from arviz import InferenceData
from arviz.tests.helpers import check_multiple_attrs
from numpy import ma
import pymc as pm
from pymc.backends.arviz import (
InferenceDataConverter,
predictions_to_inference_data,
to_inference_data,
)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
@pytest.fixture(scope="module")
def draws():
"""Share default draw count."""
return 500
@pytest.fixture(scope="module")
def chains():
"""Share default chain count."""
return 2
class TestDataPyMC:
class Data:
def __init__(self, model, trace):
self.model = model
self.obj = trace
@pytest.fixture(scope="class")
def data(self, eight_schools_params, draws, chains):
with pm.Model() as model:
mu = pm.Normal("mu", mu=0, sd=5)
tau = pm.HalfCauchy("tau", beta=5)
eta = pm.Normal("eta", mu=0, sd=1, size=eight_schools_params["J"])
theta = pm.Deterministic("theta", mu + tau * eta)
pm.Normal(
"obs",
mu=theta,
sd=eight_schools_params["sigma"],
observed=eight_schools_params["y"],
)
trace = pm.sample(draws, chains=chains, return_inferencedata=False)
return self.Data(model, trace)
def get_inference_data(self, data, eight_schools_params):
with data.model:
prior = pm.sample_prior_predictive(return_inferencedata=False)
posterior_predictive = pm.sample_posterior_predictive(
data.obj, return_inferencedata=False
)
return to_inference_data(
trace=data.obj,
prior=prior,
posterior_predictive=posterior_predictive,
coords={"school": np.arange(eight_schools_params["J"])},
dims={"theta": ["school"], "eta": ["school"]},
model=data.model,
)
def get_predictions_inference_data(
self, data, eight_schools_params, inplace
) -> Tuple[InferenceData, Dict[str, np.ndarray]]:
with data.model:
prior = pm.sample_prior_predictive(return_inferencedata=False)
posterior_predictive = pm.sample_posterior_predictive(
data.obj, keep_size=True, return_inferencedata=False
)
idata = to_inference_data(
trace=data.obj,
prior=prior,
coords={"school": np.arange(eight_schools_params["J"])},
dims={"theta": ["school"], "eta": ["school"]},
)
assert isinstance(idata, InferenceData)
extended = predictions_to_inference_data(
posterior_predictive, idata_orig=idata, inplace=inplace
)
assert isinstance(extended, InferenceData)
assert (id(idata) == id(extended)) == inplace
return (extended, posterior_predictive)
def make_predictions_inference_data(
self, data, eight_schools_params
) -> Tuple[InferenceData, Dict[str, np.ndarray]]:
with data.model:
posterior_predictive = pm.sample_posterior_predictive(
data.obj, keep_size=True, return_inferencedata=False
)
idata = predictions_to_inference_data(
posterior_predictive,
posterior_trace=data.obj,
coords={"school": np.arange(eight_schools_params["J"])},
dims={"theta": ["school"], "eta": ["school"]},
)
assert isinstance(idata, InferenceData)
return idata, posterior_predictive
def test_to_idata(self, data, eight_schools_params, chains, draws):
inference_data = self.get_inference_data(data, eight_schools_params)
test_dict = {
"posterior": ["mu", "tau", "eta", "theta"],
"sample_stats": ["diverging", "lp", "~log_likelihood"],
"log_likelihood": ["obs"],
"posterior_predictive": ["obs"],
"prior": ["mu", "tau", "eta", "theta"],
"prior_predictive": ["obs"],
"observed_data": ["obs"],
}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
chains = inference_data.posterior.dims["chain"]
draws = inference_data.posterior.dims["draw"]
obs = inference_data.observed_data["obs"]
assert inference_data.log_likelihood["obs"].shape == (chains, draws) + obs.shape
def test_predictions_to_idata(self, data, eight_schools_params):
"Test that we can add predictions to a previously-existing InferenceData."
test_dict = {
"posterior": ["mu", "tau", "eta", "theta"],
"sample_stats": ["diverging", "lp"],
"log_likelihood": ["obs"],
"predictions": ["obs"],
"prior": ["mu", "tau", "eta", "theta"],
"observed_data": ["obs"],
}
# check adding non-destructively
inference_data, _ = self.get_predictions_inference_data(data, eight_schools_params, False)
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
for key, ivalues in inference_data.predictions.items():
assert (
len(ivalues["chain"]) == inference_data.posterior.dims["chain"]
) # same chains as in posterior
# check adding in place
inference_data, posterior_predictive = self.get_predictions_inference_data(
data, eight_schools_params, True
)
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
for key, ivalues in inference_data.predictions.items():
assert (
len(ivalues["chain"]) == inference_data.posterior.dims["chain"]
) # same chains as in posterior
def test_predictions_to_idata_new(self, data, eight_schools_params):
# check creating new
inference_data, posterior_predictive = self.make_predictions_inference_data(
data, eight_schools_params
)
test_dict = {
"posterior": ["mu", "tau", "eta", "theta"],
"predictions": ["obs"],
"~observed_data": "",
}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
for key, values in posterior_predictive.items():
ivalues = inference_data.predictions[key]
assert (len(ivalues["chain"]) == 2) and (len(ivalues["draw"]) == 500)
def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools_params):
with data.model:
posterior_predictive = pm.sample_posterior_predictive(
data.obj, keep_size=True, return_inferencedata=False
)
inference_data = to_inference_data(
trace=data.obj,
posterior_predictive=posterior_predictive,
coords={"school": np.arange(eight_schools_params["J"])},
dims={"theta": ["school"], "eta": ["school"]},
)
shape = inference_data.posterior_predictive.obs.shape
assert np.all(
[obs_s == s for obs_s, s in zip(shape, (chains, draws, eight_schools_params["J"]))]
)
def test_posterior_predictive_warning(self, data, eight_schools_params, caplog):
with data.model:
posterior_predictive = pm.sample_posterior_predictive(
data.obj, 370, return_inferencedata=False, keep_size=False
)
with pytest.warns(UserWarning, match="shape of variables"):
inference_data = to_inference_data(
trace=data.obj,
posterior_predictive=posterior_predictive,
coords={"school": np.arange(eight_schools_params["J"])},
dims={"theta": ["school"], "eta": ["school"]},
)
shape = inference_data.posterior_predictive.obs.shape
assert np.all([obs_s == s for obs_s, s in zip(shape, (1, 370, eight_schools_params["J"]))])
def test_posterior_predictive_thinned(self, data):
with data.model:
idata = pm.sample(tune=5, draws=20, chains=2, return_inferencedata=True)
thinned_idata = idata.sel(draw=slice(None, None, 4))
idata.extend(pm.sample_posterior_predictive(thinned_idata))
test_dict = {
"posterior": ["mu", "tau", "eta", "theta"],
"sample_stats": ["diverging", "lp", "~log_likelihood"],
"log_likelihood": ["obs"],
"posterior_predictive": ["obs"],
"observed_data": ["obs"],
}
fails = check_multiple_attrs(test_dict, idata)
assert not fails
assert idata.posterior.dims["chain"] == 2
assert idata.posterior.dims["draw"] == 20
assert idata.posterior_predictive.dims["chain"] == 2
assert idata.posterior_predictive.dims["draw"] == 5
@pytest.mark.parametrize("use_context", [True, False])
def test_autodetect_coords_from_model(self, use_context):
df_data = pd.DataFrame(columns=["date"]).set_index("date")
dates = pd.date_range(start="2020-05-01", end="2020-05-20")
for city, mu in {"Berlin": 15, "San Marino": 18, "Paris": 16}.items():
df_data[city] = np.random.normal(loc=mu, size=len(dates))
df_data.index = dates
df_data.index.name = "date"
coords = {"date": df_data.index, "city": df_data.columns}
with pm.Model(coords=coords) as model:
europe_mean = pm.Normal("europe_mean_temp", mu=15.0, sd=3.0)
city_offset = pm.Normal("city_offset", mu=0.0, sd=3.0, dims="city")
city_temperature = pm.Deterministic(
"city_temperature", europe_mean + city_offset, dims="city"
)
data_dims = ("date", "city")
data = pm.ConstantData("data", df_data, dims=data_dims)
_ = pm.Normal("likelihood", mu=city_temperature, sd=0.5, observed=data, dims=data_dims)
trace = pm.sample(
return_inferencedata=False,
compute_convergence_checks=False,
cores=1,
chains=1,
tune=20,
draws=30,
step=pm.Metropolis(),
)
if use_context:
idata = to_inference_data(trace=trace)
if not use_context:
idata = to_inference_data(trace=trace, model=model)
assert "city" in list(idata.posterior.dims)
assert "city" in list(idata.observed_data.dims)
assert "date" in list(idata.observed_data.dims)
np.testing.assert_array_equal(idata.posterior.coords["city"], coords["city"])
np.testing.assert_array_equal(idata.observed_data.coords["date"], coords["date"])
np.testing.assert_array_equal(idata.observed_data.coords["city"], coords["city"])
def test_ovewrite_model_coords_dims(self):
"""Check coords and dims from model object can be partially overwritten."""
dim1 = ["a", "b"]
new_dim1 = ["c", "d"]
coords = {"dim1": dim1, "dim2": ["c1", "c2"]}
x_data = np.arange(4).reshape((2, 2))
y = x_data + np.random.normal(size=(2, 2))
with pm.Model(coords=coords):
x = pm.ConstantData("x", x_data, dims=("dim1", "dim2"))
beta = pm.Normal("beta", 0, 1, dims="dim1")
_ = pm.Normal("obs", x * beta, 1, observed=y, dims=("dim1", "dim2"))
trace = pm.sample(100, tune=100, return_inferencedata=False)
idata1 = to_inference_data(trace)
idata2 = to_inference_data(trace, coords={"dim1": new_dim1}, dims={"beta": ["dim2"]})
test_dict = {"posterior": ["beta"], "observed_data": ["obs"], "constant_data": ["x"]}
fails1 = check_multiple_attrs(test_dict, idata1)
assert not fails1
fails2 = check_multiple_attrs(test_dict, idata2)
assert not fails2
assert "dim1" in list(idata1.posterior.beta.dims)
assert "dim2" in list(idata2.posterior.beta.dims)
assert np.all(idata1.constant_data.x.dim1.values == np.array(dim1))
assert np.all(idata1.constant_data.x.dim2.values == np.array(["c1", "c2"]))
assert np.all(idata2.constant_data.x.dim1.values == np.array(new_dim1))
assert np.all(idata2.constant_data.x.dim2.values == np.array(["c1", "c2"]))
def test_missing_data_model(self):
# source pymc/pymc/tests/test_missing.py
data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
model = pm.Model()
with model:
x = pm.Normal("x", 1, 1)
y = pm.Normal("y", x, 1, observed=data)
inference_data = pm.sample(100, chains=2, return_inferencedata=True)
# make sure that data is really missing
assert "y_missing" in model.named_vars
test_dict = {
"posterior": ["x", "y_missing"],
"observed_data": ["y_observed"],
"log_likelihood": ["y_observed"],
}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
# The missing part of partial observed RVs is not included in log_likelihood
# See https://github.com/pymc-devs/pymc/issues/5255
assert inference_data.log_likelihood["y_observed"].shape == (2, 100, 3)
@pytest.mark.xfal(reason="Multivariate partial observed RVs not implemented for V4")
@pytest.mark.xfail(reason="LKJCholeskyCov not refactored for v4")
def test_mv_missing_data_model(self):
data = ma.masked_values([[1, 2], [2, 2], [-1, 4], [2, -1], [-1, -1]], value=-1)
model = pm.Model()
with model:
mu = pm.Normal("mu", 0, 1, size=2)
sd_dist = pm.HalfNormal.dist(1.0)
chol, *_ = pm.LKJCholeskyCov("chol_cov", n=2, eta=1, sd_dist=sd_dist, compute_corr=True)
y = pm.MvNormal("y", mu=mu, chol=chol, observed=data)
inference_data = pm.sample(100, chains=2, return_inferencedata=True)
# make sure that data is really missing
assert isinstance(y.owner.op, (AdvancedIncSubtensor, AdvancedIncSubtensor1))
test_dict = {
"posterior": ["mu", "chol_cov"],
"observed_data": ["y"],
"log_likelihood": ["y"],
}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
@pytest.mark.parametrize("log_likelihood", [True, False, ["y1"]])
def test_multiple_observed_rv(self, log_likelihood):
y1_data = np.random.randn(10)
y2_data = np.random.randn(100)
with pm.Model():
x = pm.Normal("x", 1, 1)
pm.Normal("y1", x, 1, observed=y1_data)
pm.Normal("y2", x, 1, observed=y2_data)
inference_data = pm.sample(
100,
chains=2,
return_inferencedata=True,
idata_kwargs={"log_likelihood": log_likelihood},
)
test_dict = {
"posterior": ["x"],
"observed_data": ["y1", "y2"],
"log_likelihood": ["y1", "y2"],
"sample_stats": ["diverging", "lp", "~log_likelihood"],
}
if not log_likelihood:
test_dict.pop("log_likelihood")
test_dict["~log_likelihood"] = []
elif isinstance(log_likelihood, list):
test_dict["log_likelihood"] = ["y1", "~y2"]
assert inference_data.log_likelihood["y1"].shape == (2, 100, 10)
else:
assert inference_data.log_likelihood["y1"].shape == (2, 100, 10)
assert inference_data.log_likelihood["y2"].shape == (2, 100, 100)
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
def test_single_observation(self):
with pm.Model():
p = pm.Uniform("p", 0, 1)
pm.Binomial("w", p=p, n=2, observed=1)
inference_data = pm.sample(500, chains=2, return_inferencedata=True)
assert inference_data
assert inference_data.log_likelihood["w"].shape == (2, 500, 1)
def test_potential(self):
with pm.Model():
x = pm.Normal("x", 0.0, 1.0)
pm.Potential("z", pm.logp(pm.Normal.dist(x, 1.0), np.random.randn(10)))
inference_data = pm.sample(100, chains=2, return_inferencedata=True)
assert inference_data
@pytest.mark.parametrize("use_context", [True, False])
def test_constant_data(self, use_context):
"""Test constant_data group behaviour."""
with pm.Model() as model:
x = pm.ConstantData("x", [1.0, 2.0, 3.0])
y = pm.MutableData("y", [1.0, 2.0, 3.0])
beta = pm.Normal("beta", 0, 1)
obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable
trace = pm.sample(100, chains=2, tune=100, return_inferencedata=False)
if use_context:
inference_data = to_inference_data(trace=trace)
if not use_context:
inference_data = to_inference_data(trace=trace, model=model)
test_dict = {"posterior": ["beta"], "observed_data": ["obs"], "constant_data": ["x"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
assert inference_data.log_likelihood["obs"].shape == (2, 100, 3)
def test_predictions_constant_data(self):
with pm.Model():
x = pm.ConstantData("x", [1.0, 2.0, 3.0])
y = pm.MutableData("y", [1.0, 2.0, 3.0])
beta = pm.Normal("beta", 0, 1)
obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable
trace = pm.sample(100, tune=100, return_inferencedata=False)
inference_data = to_inference_data(trace)
test_dict = {"posterior": ["beta"], "observed_data": ["obs"], "constant_data": ["x"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
with pm.Model():
x = pm.MutableData("x", [1.0, 2.0])
y = pm.ConstantData("y", [1.0, 2.0])
beta = pm.Normal("beta", 0, 1)
obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable
predictive_trace = pm.sample_posterior_predictive(
inference_data, return_inferencedata=False
)
assert set(predictive_trace.keys()) == {"obs"}
# this should be four chains of 100 samples
# assert predictive_trace["obs"].shape == (400, 2)
# but the shape seems to vary between pymc versions
inference_data = predictions_to_inference_data(predictive_trace, posterior_trace=trace)
test_dict = {"posterior": ["beta"], "~observed_data": ""}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails, "Posterior data not copied over as expected."
test_dict = {"predictions": ["obs"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails, "Predictions not instantiated as expected."
test_dict = {"predictions_constant_data": ["x"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails, "Predictions constant data not instantiated as expected."
def test_no_trace(self):
with pm.Model() as model:
x = pm.ConstantData("x", [1.0, 2.0, 3.0])
y = pm.MutableData("y", [1.0, 2.0, 3.0])
beta = pm.Normal("beta", 0, 1)
obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable
idata = pm.sample(100, tune=100)
prior = pm.sample_prior_predictive(return_inferencedata=False)
posterior_predictive = pm.sample_posterior_predictive(idata, return_inferencedata=False)
# Only prior
inference_data = to_inference_data(prior=prior, model=model)
test_dict = {"prior": ["beta"], "prior_predictive": ["obs"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
# Only posterior_predictive
inference_data = to_inference_data(posterior_predictive=posterior_predictive, model=model)
test_dict = {"posterior_predictive": ["obs"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
# Prior and posterior_predictive but no trace
inference_data = to_inference_data(
prior=prior, posterior_predictive=posterior_predictive, model=model
)
test_dict = {
"prior": ["beta"],
"prior_predictive": ["obs"],
"posterior_predictive": ["obs"],
}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
@pytest.mark.parametrize("use_context", [True, False])
def test_priors_separation(self, use_context):
"""Test model is enough to get prior, prior predictive and observed_data."""
with pm.Model() as model:
x = pm.MutableData("x", [1.0, 2.0, 3.0])
y = pm.ConstantData("y", [1.0, 2.0, 3.0])
beta = pm.Normal("beta", 0, 1)
obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable
prior = pm.sample_prior_predictive(return_inferencedata=False)
test_dict = {
"prior": ["beta", "~obs"],
"observed_data": ["obs"],
"prior_predictive": ["obs"],
}
if use_context:
with model:
inference_data = to_inference_data(prior=prior)
else:
inference_data = to_inference_data(prior=prior, model=model)
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
def test_conversion_from_variables_subset(self):
"""This is a regression test for issue #5337."""
with pm.Model() as model:
x = pm.Normal("x")
pm.Normal("y", x, observed=5)
idata = pm.sample(
tune=10, draws=20, chains=1, step=pm.Metropolis(), compute_convergence_checks=False
)
pm.sample_posterior_predictive(idata, var_names=["x"])
pm.sample_prior_predictive(var_names=["x"])
def test_multivariate_observations(self):
coords = {"direction": ["x", "y", "z"], "experiment": np.arange(20)}
data = np.random.multinomial(20, [0.2, 0.3, 0.5], size=20)
with pm.Model(coords=coords):
p = pm.Beta("p", 1, 1, size=(3,))
p = p / p.sum()
pm.Multinomial("y", 20, p, dims=("experiment", "direction"), observed=data)
idata = pm.sample(draws=50, chains=2, tune=100, return_inferencedata=True)
test_dict = {
"posterior": ["p"],
"sample_stats": ["lp"],
"log_likelihood": ["y"],
"observed_data": ["y"],
}
fails = check_multiple_attrs(test_dict, idata)
assert not fails
assert "direction" not in idata.log_likelihood.dims
assert "direction" in idata.observed_data.dims
assert idata.log_likelihood["y"].shape == (2, 50, 20)
def test_constant_data_coords_issue_5046(self):
"""This is a regression test against a bug where a local coords variable was overwritten."""
dims = {"alpha": ["backwards"], "bravo": ["letters", "yesno"]}
coords = {
"backwards": np.arange(17)[::-1],
"letters": list("ABCDEFGHIJK"),
"yesno": ["yes", "no"],
}
data = {
name: np.random.uniform(size=[len(coords[dn]) for dn in dnames])
for name, dnames in dims.items()
}
for k in data:
assert len(data[k].shape) == len(dims[k])
ds = pm.backends.arviz.dict_to_dataset(
data=data, library=pm, coords=coords, dims=dims, default_dims=[], index_origin=0
)
for dname, cvals in coords.items():
np.testing.assert_array_equal(ds[dname].values, cvals)
def test_issue_5043_autoconvert_coord_values(self):
coords = {"city": pd.Series(["Bonn", "Berlin"])}
with pm.Model(coords=coords) as pmodel:
# The model tracks coord values as (immutable) tuples
assert isinstance(pmodel.coords["city"], tuple)
pm.Normal("x", dims="city")
mtrace = pm.sample(
return_inferencedata=False,
compute_convergence_checks=False,
step=pm.Metropolis(),
cores=1,
tune=7,
draws=15,
)
# The converter must convert coord values them to numpy arrays
# because tuples as coordinate values causes problems with xarray.
converter = InferenceDataConverter(trace=mtrace)
assert isinstance(converter.coords["city"], np.ndarray)
converter.to_inference_data()
# We're not automatically converting things other than tuple,
# so advanced use cases remain supported at the InferenceData level.
# They just can't be used in the model construction already.
converter = InferenceDataConverter(
trace=mtrace,
coords={
"city": pd.MultiIndex.from_tuples(
[("Bonn", 53111), ("Berlin", 10178)], names=["name", "zipcode"]
)
},
)
assert isinstance(converter.coords["city"], pd.MultiIndex)
class TestPyMCWarmupHandling:
@pytest.mark.parametrize("save_warmup", [False, True])
@pytest.mark.parametrize("chains", [1, 2])
@pytest.mark.parametrize("tune,draws", [(0, 50), (10, 40), (30, 0)])
def test_save_warmup(self, save_warmup, chains, tune, draws):
with pm.Model():
pm.Uniform("u1")
pm.Normal("n1")
idata = pm.sample(
tune=tune,
draws=draws,
chains=chains,
cores=1,
step=pm.Metropolis(),
discard_tuned_samples=False,
return_inferencedata=True,
idata_kwargs={"save_warmup": save_warmup},
)
warmup_prefix = "" if save_warmup and (tune > 0) else "~"
post_prefix = "" if draws > 0 else "~"
test_dict = {
f"{post_prefix}posterior": ["u1", "n1"],
f"{post_prefix}sample_stats": ["~tune", "accept"],
f"{warmup_prefix}warmup_posterior": ["u1", "n1"],
f"{warmup_prefix}warmup_sample_stats": ["~tune"],
"~warmup_log_likelihood": [],
"~log_likelihood": [],
}
fails = check_multiple_attrs(test_dict, idata)
assert not fails
if hasattr(idata, "posterior"):
assert idata.posterior.dims["chain"] == chains
assert idata.posterior.dims["draw"] == draws
if hasattr(idata, "warmup_posterior"):
assert idata.warmup_posterior.dims["chain"] == chains
assert idata.warmup_posterior.dims["draw"] == tune
def test_save_warmup_issue_1208_after_3_9(self):
with pm.Model():
pm.Uniform("u1")
pm.Normal("n1")
trace = pm.sample(
tune=100,
draws=200,
chains=2,
cores=1,
step=pm.Metropolis(),
discard_tuned_samples=False,
return_inferencedata=False,
)
assert isinstance(trace, pm.backends.base.MultiTrace)
assert len(trace) == 300
# from original trace, warmup draws should be separated out
idata = to_inference_data(trace, save_warmup=True)
test_dict = {
"posterior": ["u1", "n1"],
"sample_stats": ["~tune", "accept"],
"warmup_posterior": ["u1", "n1"],
"warmup_sample_stats": ["~tune", "accept"],
}
fails = check_multiple_attrs(test_dict, idata)
assert not fails
assert idata.posterior.dims["chain"] == 2
assert idata.posterior.dims["draw"] == 200
# manually sliced trace triggers the same warning as <=3.8
with pytest.warns(UserWarning, match="Warmup samples"):
idata = to_inference_data(trace[-30:], save_warmup=True)
test_dict = {
"posterior": ["u1", "n1"],
"sample_stats": ["~tune", "accept"],
"~warmup_posterior": [],
"~warmup_sample_stats": [],
}
fails = check_multiple_attrs(test_dict, idata)
assert not fails
assert idata.posterior.dims["chain"] == 2
assert idata.posterior.dims["draw"] == 30
|
the-stack_0_20381 | """
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
import pymunk
from PIL import Image
from pymunk import autogeometry
from typing import List, Tuple, cast
from arcade import Color
from arcade import RGBA
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
|
the-stack_0_20382 | _base_ = [
'../../_base_/datasets/td.py',
'../../_base_/schedules/schedule_1x.py',
'../../../_base_/default_runtime.py'
]
data_root = 'data/td/'
img_rescale_ratio = 0.25
img_scale=(3920*img_rescale_ratio, 2160*img_rescale_ratio)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
classes = ('car','other_vehicle')
# model settings
model = dict(
type='FCOSOBB',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='OBBFCOSHead',
num_classes=2,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
scale_theta=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='PolyIoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='obb_nms', iou_thr=0.1),
max_per_img=200)
# optimizer
optimizer = dict(
lr=0.0025, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadOBBAnnotations', with_bbox=True,
with_label=True, with_poly_as_mask=True),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(type='OBBRandomFlip', h_flip_ratio=0.5, v_flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomOBBRotate', rotate_after_flip=True,keep_shape=False, #not keep shape will have more black edge
angles=(-180, 180), vert_rate=0.5),
dict(type='Pad', size_divisor=32),
dict(type='Mask2OBB', obb_type='obb'),
dict(type='OBBDefaultFormatBundle'),
dict(type='OBBCollect', keys=['img', 'gt_bboxes', 'gt_obboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipRotateAug',
img_scale=[img_scale],
h_flip=False,
v_flip=False,
rotate=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='OBBRandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomOBBRotate', rotate_after_flip=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='OBBCollect', keys=['img']),
])
]
# does evaluation while training
# uncomments it when you need evaluate every epoch
data = dict(
samples_per_gpu=2,
workers_per_gpu=4,
train=dict(
ann_file=data_root + 'split_set_train/annfiles2/*.pkl',
img_prefix=data_root + 'split_set_train/images/',
pipeline=train_pipeline,
classes=classes),
val=dict(
ann_file=data_root + 'split_set_test/annfiles2/*.pkl',
img_prefix=data_root + 'split_set_test/images/',
pipeline=test_pipeline,
classes=classes),
test=dict(
ann_file=data_root + 'split_set_test/annfiles2/*.pkl',
img_prefix=data_root + 'split_set_test/images/',
pipeline=test_pipeline,
classes=classes))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
total_epochs = 24
work_dir = 'data/td/work_dirs/fcos_obb_r50_fpn_gn-head_4x4_1x_td_mixmorepatch_rotate' |
the-stack_0_20383 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for pySparkling for Spark running in YARN mode
"""
import generic_test_utils
from integ_test_utils import *
import unittest
class YarnIntegTestSuite(unittest.TestCase):
def test_chicago_crime(self):
env = IntegTestEnv()
env.set_spark_master("yarn-client")
# Configure YARN environment
env.conf("spark.yarn.max.executor.failures", 1) # In fail of executor, fail the test
env.conf("spark.executor.instances", 3)
env.conf("spark.executor.memory", "2g")
env.conf("spark.ext.h2o.port.base", 63331)
env.conf("spark.driver.memory", "2g")
return_code = launch(env, "examples/scripts/ChicagoCrimeDemo.py")
self.assertTrue(return_code == 0, "Process ended in a wrong way. It ended with return code "+str(return_code))
if __name__ == '__main__':
generic_test_utils.run_tests([YarnIntegTestSuite], file_name="py_integ_yarn_tests_report")
|
the-stack_0_20384 | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import nova.scheduler.utils
import nova.servicegroup
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
class TestListServersIpFilter(test.TestCase):
def setUp(self):
super(TestListServersIpFilter, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.neutron = self.useFixture(
nova_fixtures.NeutronFixture(self))
# Add a 2nd port to the neutron fixture to have multiple ports
self.neutron.create_port(self.neutron.port_2)
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(nova_fixtures.PlacementFixture())
self.start_service('conductor')
self.flags(enabled_filters=['ComputeFilter'],
group='filter_scheduler')
self.start_service('scheduler')
self.start_service('compute')
self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self))
self.useFixture(nova_fixtures.PlacementFixture())
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def wait_until_active_or_timeout(self, server_id):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != "ACTIVE" and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != "ACTIVE":
self.fail(
'Timed out waiting for server %s to be ACTIVE.' % server_id)
return server
def test_list_servers_with_ip_filters_regex(self):
"""Tests listing servers with IP filter regex.
The compute API will perform a regex match on the ip filter and include
all servers that have fixed IPs which match the filter.
For example, consider we have two servers. The first server has IP
10.1.1.1 and the second server has IP 10.1.1.10. If we list servers
with filter ip=10.1.1.1 we should get back both servers because
10.1.1.1 is a prefix of 10.1.1.10. If we list servers with filter
ip=10.1.1.10 then we should only get back the second server.
"""
# We're going to create two servers with unique ports, but the IPs on
# the ports are close enough that one matches the regex for the other.
# The ports used in this test are defined in the NeutronFixture.
for port_id in (self.neutron.port_1['id'], self.neutron.port_2['id']):
server = dict(
name=port_id, imageRef=self.image_id, flavorRef=self.flavor_id,
networks=[{'port': port_id}])
server = self.api.post_server({'server': server})
self.addCleanup(self.api.delete_server, server['id'])
self.wait_until_active_or_timeout(server['id'])
# Now list servers and filter on the IP of the first server.
servers = self.api.get_servers(
search_opts={
'ip': self.neutron.port_1['fixed_ips'][0]['ip_address']})
# We should get both servers back because the IP on the first server is
# a prefix of the IP on the second server.
self.assertEqual(2, len(servers),
'Unexpected number of servers returned when '
'filtering by ip=%s: %s' % (
self.neutron.port_1['fixed_ips'][0]['ip_address'],
servers))
# Now list servers and filter on the IP of the second server.
servers = self.api.get_servers(
search_opts={
'ip': self.neutron.port_2['fixed_ips'][0]['ip_address']})
# We should get one server back because the IP on the second server is
# unique between both servers.
self.assertEqual(1, len(servers),
'Unexpected number of servers returned when '
'filtering by ip=%s: %s' % (
self.neutron.port_2['fixed_ips'][0]['ip_address'],
servers))
self.assertEqual(self.neutron.port_2['fixed_ips'][0]['ip_address'],
servers[0]['addresses']['private-network'][0]['addr'])
|
the-stack_0_20385 | import asyncio
import sys
import time
from datetime import datetime
from decimal import Decimal
from typing import Callable, List, Optional, Tuple, Dict
import aiohttp
from cryptodoge.cmds.units import units
from cryptodoge.rpc.wallet_rpc_client import WalletRpcClient
from cryptodoge.server.start_wallet import SERVICE_NAME
from cryptodoge.util.bech32m import encode_puzzle_hash
from cryptodoge.util.byte_types import hexstr_to_bytes
from cryptodoge.util.config import load_config
from cryptodoge.util.default_root import DEFAULT_ROOT_PATH
from cryptodoge.util.ints import uint16, uint64
from cryptodoge.wallet.transaction_record import TransactionRecord
from cryptodoge.wallet.util.wallet_types import WalletType
def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None:
if verbose:
print(tx)
else:
cryptodoge_amount = Decimal(int(tx.amount)) / units["cryptodoge"]
to_address = encode_puzzle_hash(tx.to_puzzle_hash, name)
print(f"Transaction {tx.name}")
print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}")
print(f"Amount {'sent' if tx.sent else 'received'}: {cryptodoge_amount} {name}")
print(f"To address: {to_address}")
print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S"))
print("")
async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
transaction_id = hexstr_to_bytes(args["tx_id"])
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id)
print_transaction(tx, verbose=(args["verbose"] > 0), name=name)
async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
paginate = args["paginate"]
if paginate is None:
paginate = sys.stdout.isatty()
txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
if len(txs) == 0:
print("There are no transactions to this address")
offset = args["offset"]
num_per_screen = 5 if paginate else len(txs)
for i in range(offset, len(txs), num_per_screen):
for j in range(0, num_per_screen):
if i + j >= len(txs):
break
print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name)
if i + num_per_screen >= len(txs):
return None
print("Press q to quit, or c to continue")
while True:
entered_key = sys.stdin.read(1)
if entered_key == "q":
return None
elif entered_key == "c":
break
def check_unusual_transaction(amount: Decimal, fee: Decimal):
return fee >= amount
async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
amount = Decimal(args["amount"])
fee = Decimal(args["fee"])
address = args["address"]
override = args["override"]
if not override and check_unusual_transaction(amount, fee):
print(
f"A transaction of amount {amount} and fee {fee} is unusual.\n"
f"Pass in --override if you are sure you mean to do this."
)
return
print("Submitting transaction...")
final_amount = uint64(int(amount * units["cryptodoge"]))
final_fee = uint64(int(fee * units["cryptodoge"]))
res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do cryptodoge wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return None
print("Transaction not yet submitted to nodes")
print(f"Do 'cryptodoge wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
res = await wallet_client.get_next_address(wallet_id, False)
print(res)
async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
await wallet_client.delete_unconfirmed_transactions(wallet_id)
print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}")
def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]:
if typ == WalletType.COLOURED_COIN:
return "", units["colouredcoin"]
if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG, WalletType.RATE_LIMITED]:
return address_prefix, units["cryptodoge"]
return "", units["mojo"]
def print_balance(amount: int, scale: int, address_prefix: str) -> str:
ret = f"{amount/scale} {address_prefix} "
if scale > 1:
ret += f"({amount} mojo)"
return ret
async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
summaries_response = await wallet_client.get_wallets()
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
print(f"Balances, fingerprint: {fingerprint}")
for summary in summaries_response:
wallet_id = summary["id"]
balances = await wallet_client.get_wallet_balance(wallet_id)
typ = WalletType(int(summary["type"]))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Wallet ID {wallet_id} type {typ.name} {summary['name']}")
print(f" -Total Balance: {print_balance(balances['confirmed_wallet_balance'], scale, address_prefix)}")
print(
f" -Pending Total Balance: {print_balance(balances['unconfirmed_wallet_balance'], scale, address_prefix)}"
)
print(f" -Spendable: {print_balance(balances['spendable_balance'], scale, address_prefix)}")
async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]:
if fingerprint is not None:
fingerprints = [fingerprint]
else:
fingerprints = await wallet_client.get_public_keys()
if len(fingerprints) == 0:
print("No keys loaded. Run 'cryptodoge keys generate' or import a key")
return None
if len(fingerprints) == 1:
fingerprint = fingerprints[0]
if fingerprint is not None:
log_in_response = await wallet_client.log_in(fingerprint)
else:
print("Choose wallet key:")
for i, fp in enumerate(fingerprints):
print(f"{i+1}) {fp}")
val = None
while val is None:
val = input("Enter a number to pick or q to quit: ")
if val == "q":
return None
if not val.isdigit():
val = None
else:
index = int(val) - 1
if index >= len(fingerprints):
print("Invalid value")
val = None
continue
else:
fingerprint = fingerprints[index]
assert fingerprint is not None
log_in_response = await wallet_client.log_in(fingerprint)
if log_in_response["success"] is False:
if log_in_response["error"] == "not_initialized":
use_cloud = True
if "backup_path" in log_in_response:
path = log_in_response["backup_path"]
print(f"Backup file from backup.cryptodoge.cc downloaded and written to: {path}")
val = input("Do you want to use this file to restore from backup? (Y/N) ")
if val.lower() == "y":
log_in_response = await wallet_client.log_in_and_restore(fingerprint, path)
else:
use_cloud = False
if "backup_path" not in log_in_response or use_cloud is False:
if use_cloud is True:
val = input(
"No online backup file found,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
else:
val = input(
"Cloud backup declined,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
if val.lower() == "s":
log_in_response = await wallet_client.log_in_and_skip(fingerprint)
elif val.lower() == "f":
val = input("Please provide the full path to your backup file: ")
log_in_response = await wallet_client.log_in_and_restore(fingerprint, val)
if "success" not in log_in_response or log_in_response["success"] is False:
if "error" in log_in_response:
error = log_in_response["error"]
print(f"Error: {log_in_response[error]}")
return None
return wallet_client, fingerprint
async def execute_with_wallet(
wallet_rpc_port: Optional[int], fingerprint: int, extra_params: Dict, function: Callable
) -> None:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
if wallet_client_f is None:
wallet_client.close()
await wallet_client.await_closed()
return None
wallet_client, fingerprint = wallet_client_f
await function(extra_params, wallet_client, fingerprint)
except KeyboardInterrupt:
pass
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if the wallet is running at {wallet_rpc_port}. "
"You can run the wallet via:\n\tcryptodoge start wallet"
)
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
|
the-stack_0_20386 | """Class for the Query-Adaptive Convolution (QAConv) loss
QAConv is an effective image matching method proposed in
Shengcai Liao and Ling Shao, "Interpretable and Generalizable Person Re-Identification with Query-Adaptive
Convolution and Temporal Lifting." In The European Conference on Computer Vision (ECCV), 23-28 August, 2020.
Author:
Shengcai Liao
[email protected]
Version:
V1.1
July 13, 2020
"""
import torch
from torch import nn
from torch.nn import Module
from torch.nn import functional as F
class QAConvLoss(Module):
def __init__(self, num_classes, num_features, height, width, mem_batch_size=16):
"""
Inputs:
num_classes: the number of classes in the training set.
num_features: the number of feature channels in the final feature map.
height: height of the final feature map
width: width of the final feature map
mem_batch_size: batch size of the class memory for query-adaptive convolution. For
mem_batch_size >= num_classes, that is, doing convolution at once with all the class memory, the
computation would be faster, however, in the cost of possibly large GPU memory.
"""
super(QAConvLoss, self).__init__()
self.num_classes = num_classes
self.num_features = num_features
self.height = height
self.width = width
self.mem_batch_size = mem_batch_size
self.register_buffer('class_memory', torch.zeros(num_classes, num_features, height, width))
self.bn = nn.BatchNorm1d(1)
self.fc = nn.Linear(self.height * self.width * 2, 1)
self.logit_bn = nn.BatchNorm1d(1)
self.reset_parameters()
def reset_running_stats(self):
self.class_memory.zero_()
def reset_parameters(self):
self.reset_running_stats()
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
def forward(self, feature, target):
self._check_input_dim(feature)
kernel = feature.permute([0, 2, 3, 1]) # [b, h, w, d]
kernel = kernel.reshape(-1, self.num_features, 1, 1) # [bhw, d, 1, 1]
hw = self.height * self.width
batch_size = target.size(0)
if self.mem_batch_size < self.num_classes:
score = torch.zeros(self.num_classes, batch_size, 2 * hw, device=feature.device)
for i in range(0, self.num_classes, self.mem_batch_size):
j = min(i + self.mem_batch_size, self.num_classes)
s = F.conv2d(self.class_memory[i: j, :, :, :].detach().clone(), kernel) # [m, bhw, h, w]
s = s.view(-1, batch_size, hw, hw)
score[i: j, :, :] = torch.cat((s.max(dim=2)[0], s.max(dim=3)[0]), dim=-1) # [m, b, 2 * hw]
else:
score = F.conv2d(self.class_memory.detach().clone(), kernel) # [c, bhw, h, w]
score = score.view(self.num_classes, batch_size, hw, hw)
score = torch.cat((score.max(dim=2)[0], score.max(dim=3)[0]), dim=-1)
score = score.view(self.num_classes, 1, batch_size * 2 * hw)
score = self.bn(score).view(self.num_classes * batch_size, 2 * hw)
score = self.fc(score).view(self.num_classes, batch_size).t()
score = self.logit_bn(score.unsqueeze(1)).squeeze()
target1 = target.unsqueeze(1)
onehot_labels = torch.zeros_like(score).scatter(1, target1, 1)
loss = F.binary_cross_entropy_with_logits(score, onehot_labels, reduction='none')
prob = score.sigmoid()
weight = torch.pow(torch.where(onehot_labels.byte(), 1. - prob, prob), 2.)
loss = loss * weight
loss = loss.sum(-1)
with torch.no_grad():
_, preds = torch.max(score, 1)
acc = (preds == target).float()
self.class_memory[target] = feature
return loss, acc
|
the-stack_0_20387 | # Module: Regression
# Author: Moez Ali <[email protected]>
# License: MIT
# Release: PyCaret 2.2.0
# Last modified : 25/10/2020
import pandas as pd
import numpy as np
import pycaret.internal.tabular
from pycaret.parallel import ParallelBackend
from pycaret.internal.Display import Display, is_in_colab, enable_colab
from typing import List, Tuple, Any, Union, Optional, Dict, Callable
import warnings
from IPython.utils import io
from pycaret.internal.tabular import MLUsecase
warnings.filterwarnings("ignore")
def setup(
data: Union[pd.DataFrame, Callable[[], pd.DataFrame]],
target: str,
train_size: float = 0.7,
test_data: Optional[pd.DataFrame] = None,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "constant",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean",
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_outliers: bool = False,
outliers_threshold: float = 0.05,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = True,
create_clusters: bool = False,
cluster_iter: int = 20,
polynomial_features: bool = False,
polynomial_degree: int = 2,
trigonometry_features: bool = False,
polynomial_threshold: float = 0.1,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
feature_selection: bool = False,
feature_selection_threshold: float = 0.8,
feature_selection_method: str = "classic",
feature_interaction: bool = False,
feature_ratio: bool = False,
interaction_threshold: float = 0.01,
transform_target: bool = False,
transform_target_method: str = "box-cox",
data_split_shuffle: bool = True,
data_split_stratify: Union[bool, List[str]] = False,
fold_strategy: Union[str, Any] = "kfold",
fold: int = 10,
fold_shuffle: bool = False,
fold_groups: Optional[Union[str, pd.DataFrame]] = None,
n_jobs: Optional[int] = -1,
use_gpu: bool = False,
custom_pipeline: Union[
Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]
] = None,
html: bool = True,
session_id: Optional[int] = None,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
experiment_custom_tags: Optional[Dict[str, Any]] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
silent: bool = False,
verbose: bool = True,
profile: bool = False,
profile_kwargs: Dict[str, Any] = None,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It takes
two mandatory parameters: ``data`` and ``target``. All the other parameters are
optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
data : Union[pd.DataFrame, Callable[[], pd.DataFrame]]
Shape (n_samples, n_features), where n_samples is the number of samples and
n_features is the number of features. If ``data`` is a function, then it should
generate the pandas dataframe. If you want to use distributed PyCaret, it is
recommended to provide a function to avoid broadcasting large datasets from
the driver to workers.
target: str
Name of the target column to be passed in as a string. The target variable can
be either binary or multiclass.
train_size: float, default = 0.7
Proportion of the dataset to be used for training and validation. Should be
between 0.0 and 1.0.
test_data: pandas.DataFrame, default = None
If not None, test_data is used as a hold-out set and ``train_size`` parameter is
ignored. test_data must be labelled and the shape of data and test_data must
match.
preprocess: bool, default = True
When set to False, no transformations are applied except for train_test_split
and custom transformations passed in ``custom_pipeline`` param. Data must be
ready for modeling (no missing values, no dates, categorical data encoding),
when preprocess is set to False.
imputation_type: str, default = 'simple'
The type of imputation to use. Can be either 'simple' or 'iterative'.
iterative_imputation_iters: int, default = 5
Number of iterations. Ignored when ``imputation_type`` is not 'iterative'.
categorical_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
categorical_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are categorical.
categorical_imputation: str, default = 'constant'
Missing values in categorical features are imputed with a constant 'not_available'
value. The other available option is 'mode'.
categorical_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in categorical features.
Ignored when ``imputation_type`` is not 'iterative'.
ordinal_features: dict, default = None
Encode categorical features as ordinal. For example, a categorical feature with
'low', 'medium', 'high' values where low < medium < high can be passed as
ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
high_cardinality_features: list of str, default = None
When categorical features contains many levels, it can be compressed into fewer
levels using this parameter. It takes a list of strings with column names that
are categorical.
high_cardinality_method: str, default = 'frequency'
Categorical features with high cardinality are replaced with the frequency of
values in each level occurring in the training dataset. Other available method
is 'clustering' which trains the K-Means clustering algorithm on the statistical
attribute of the training data and replaces the original value of feature with the
cluster label. The number of clusters is determined by optimizing Calinski-Harabasz
and Silhouette criterion.
numeric_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
numeric_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are numeric.
numeric_imputation: str, default = 'mean'
Missing values in numeric features are imputed with 'mean' value of the feature
in the training dataset. The other available option is 'median' or 'zero'.
numeric_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in numeric features.
Ignored when ``imputation_type`` is set to 'simple'.
date_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
date_features param can be used to overwrite or define the data types. It takes
a list of strings with column names that are DateTime.
ignore_features: list of str, default = None
ignore_features param can be used to ignore features during model training.
It takes a list of strings with column names that are to be ignored.
normalize: bool, default = False
When set to True, it transforms the numeric features by scaling them to a given
range. Type of scaling is defined by the ``normalize_method`` parameter.
normalize_method: str, default = 'zscore'
Defines the method for scaling. By default, normalize method is set to 'zscore'
The standard zscore is calculated as z = (x - u) / s. Ignored when ``normalize``
is not True. The other options are:
- minmax: scales and translates each feature individually such that it is in
the range of 0 - 1.
- maxabs: scales and translates each feature individually such that the
maximal absolute value of each feature will be 1.0. It does not
shift/center the data, and thus does not destroy any sparsity.
- robust: scales and translates each feature according to the Interquartile
range. When the dataset contains outliers, robust scaler often gives
better results.
transformation: bool, default = False
When set to True, it applies the power transform to make data more Gaussian-like.
Type of transformation is defined by the ``transformation_method`` parameter.
transformation_method: str, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is
set to 'yeo-johnson'. The other available option for transformation is 'quantile'.
Ignored when ``transformation`` is not True.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in unseen data are replaced by the
most or least frequent level as learned in the training dataset.
unknown_categorical_method: str, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in ``pca_method`` parameter.
pca_method: str, default = 'linear'
The 'linear' method performs uses Singular Value Decomposition. Other options are:
- kernel: dimensionality reduction through the use of RBF kernel.
- incremental: replacement for 'linear' pca when the dataset is too large.
pca_components: int or float, default = None
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be less
than the original number of features. Ignored when ``pca`` is not True.
ignore_low_variance: bool, default = False
When set to True, all categorical features with insignificant variances are
removed from the data. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, frequency percentile for levels in categorical features below
a certain threshold is combined into a single level.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Ignored when
``combine_rare_levels`` is not True.
bin_numeric_features: list of str, default = None
To convert numeric features into categorical, bin_numeric_features parameter can
be used. It takes a list of strings with column names to be discretized. It does
so by using 'sturges' rule to determine the number of clusters and then apply
KMeans algorithm. Original values of the feature are then replaced by the
cluster label.
remove_outliers: bool, default = False
When set to True, outliers from the training data are removed using the Singular
Value Decomposition.
outliers_threshold: float, default = 0.05
The percentage outliers to be removed from the training dataset. Ignored when
``remove_outliers`` is not True.
remove_multicollinearity: bool, default = False
When set to True, features with the inter-correlations higher than the defined
threshold are removed. When two features are highly correlated with each other,
the feature that is less correlated with the target variable is removed. Only
considers numeric features.
multicollinearity_threshold: float, default = 0.9
Threshold for correlated features. Ignored when ``remove_multicollinearity``
is not True.
remove_perfect_collinearity: bool, default = True
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, when two features are 100% correlated, one of it is randomly
removed from the dataset.
create_clusters: bool, default = False
When set to True, an additional feature is created in training dataset where each
instance is assigned to a cluster. The number of clusters is determined by
optimizing Calinski-Harabasz and Silhouette criterion.
cluster_iter: int, default = 20
Number of iterations for creating cluster. Each iteration represents cluster
size. Ignored when ``create_clusters`` is not True.
polynomial_features: bool, default = False
When set to True, new features are derived using existing numeric features.
polynomial_degree: int, default = 2
Degree of polynomial features. For example, if an input sample is two dimensional
and of the form [a, b], the polynomial features with degree = 2 are:
[1, a, b, a^2, ab, b^2]. Ignored when ``polynomial_features`` is not True.
trigonometry_features: bool, default = False
When set to True, new features are derived using existing numeric features.
polynomial_threshold: float, default = 0.1
When ``polynomial_features`` or ``trigonometry_features`` is True, new features
are derived from the existing numeric features. This may sometimes result in too
large feature space. polynomial_threshold parameter can be used to deal with this
problem. It does so by using combination of Random Forest, AdaBoost and Linear
correlation. All derived features that falls within the percentile distribution
are kept and rest of the features are removed.
group_features: list or list of list, default = None
When the dataset contains features with related characteristics, group_features
parameter can be used for feature extraction. It takes a list of strings with
column names that are related.
group_names: list, default = None
Group names to be used in naming new features. When the length of group_names
does not match with the length of ``group_features``, new features are named
sequentially group_1, group_2, etc. It is ignored when ``group_features`` is
None.
feature_selection: bool, default = False
When set to True, a subset of features are selected using a combination of
various permutation importance techniques including Random Forest, Adaboost
and Linear correlation with target variable. The size of the subset is
dependent on the ``feature_selection_threshold`` parameter.
feature_selection_threshold: float, default = 0.8
Threshold value used for feature selection. When ``polynomial_features`` or
``feature_interaction`` is True, it is recommended to keep the threshold low
to avoid large feature spaces. Setting a very low value may be efficient but
could result in under-fitting.
feature_selection_method: str, default = 'classic'
Algorithm for feature selection. 'classic' method uses permutation feature
importance techniques. Other possible value is 'boruta' which uses boruta
algorithm for feature selection.
feature_interaction: bool, default = False
When set to True, new features are created by interacting (a * b) all the
numeric variables in the dataset. This feature is not scalable and may not
work as expected on datasets with large feature space.
feature_ratio: bool, default = False
When set to True, new features are created by calculating the ratios (a / b)
between all numeric variables in the dataset. This feature is not scalable and
may not work as expected on datasets with large feature space.
interaction_threshold: bool, default = 0.01
Similar to polynomial_threshold, It is used to compress a sparse matrix of newly
created features through interaction. Features whose importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
transform_target: bool, default = False
When set to True, target variable is transformed using the method defined in
``transform_target_method`` param. Target transformation is applied separately
from feature transformations.
transform_target_method: str, default = 'box-cox'
'Box-cox' and 'yeo-johnson' methods are supported. Box-Cox requires input data to
be strictly positive, while Yeo-Johnson supports both positive or negative data.
When transform_target_method is 'box-cox' and target variable contains negative
values, method is internally forced to 'yeo-johnson' to avoid exceptions.
data_split_shuffle: bool, default = True
When set to False, prevents shuffling of rows during 'train_test_split'.
data_split_stratify: bool or list, default = False
Controls stratification during 'train_test_split'. When set to True, will
stratify by target column. To stratify on any other columns, pass a list of
column names. Ignored when ``data_split_shuffle`` is False.
fold_strategy: str or sklearn CV generator object, default = 'kfold'
Choice of cross validation strategy. Possible values are:
* 'kfold'
* 'stratifiedkfold'
* 'groupkfold'
* 'timeseries'
* a custom CV generator object compatible with scikit-learn.
fold: int, default = 10
Number of folds to be used in cross validation. Must be at least 2. This is
a global setting that can be over-written at function level by using ``fold``
parameter. Ignored when ``fold_strategy`` is a custom object.
fold_shuffle: bool, default = False
Controls the shuffle parameter of CV. Only applicable when ``fold_strategy``
is 'kfold' or 'stratifiedkfold'. Ignored when ``fold_strategy`` is a custom
object.
fold_groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when 'GroupKFold' is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in the training dataset. When string is passed, it is interpreted
as the column name in the dataset containing group labels.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single
processor set n_jobs to None.
use_gpu: bool or str, default = False
When set to True, it will use GPU for training with algorithms that support it,
and fall back to CPU if they are unavailable. When set to 'force', it will only
use GPU-enabled algorithms and raise exceptions when they are unavailable. When
False, all algorithms are trained using CPU only.
GPU enabled algorithms:
- Extreme Gradient Boosting, requires no further installation
- CatBoost Regressor, requires no further installation
(GPU is only enabled when data > 50,000 rows)
- Light Gradient Boosting Machine, requires GPU installation
https://lightgbm.readthedocs.io/en/latest/GPU-Tutorial.html
- Linear Regression, Lasso Regression, Ridge Regression, K Neighbors Regressor,
Random Forest, Support Vector Regression, Elastic Net requires cuML >= 0.15
https://github.com/rapidsai/cuml
custom_pipeline: (str, transformer) or list of (str, transformer), default = None
When passed, will append the custom transformers in the preprocessing pipeline
and are applied on each CV fold separately and on the final fit. All the custom
transformations are applied after 'train_test_split' and before pycaret's internal
transformations.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
experiment_custom_tags: dict, default = None
Dictionary of tag_name: String -> value: (String, but will be string-ified
if not) passed to the mlflow.set_tags to add new custom tags for the experiment.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
To change the type of plots to be logged, pass a list containing plot IDs. Refer
to documentation of ``plot_model``. Ignored when ``log_experiment`` is not True.
log_profile: bool, default = False
When set to True, data profile is logged on the ``MLflow`` server as a html file.
Ignored when ``log_experiment`` is not True.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
silent: bool, default = False
Controls the confirmation input of data types when ``setup`` is executed. When
executing in completely automated mode or on a remote kernel, this must be True.
verbose: bool, default = True
When set to False, Information grid is not printed.
profile: bool, default = False
When set to True, an interactive EDA report is displayed.
profile_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ProfileReport method used
to create the EDA report. Ignored if ``profile`` is False.
Returns:
Global variables that can be changed using the ``set_config`` function.
"""
global _pycaret_setup_call
_pycaret_setup_call = dict(func=setup, params=locals())
if not isinstance(data, pd.DataFrame):
data = data()
available_plots = {
"parameter": "Hyperparameters",
"residuals": "Residuals",
"error": "Prediction Error",
"cooks": "Cooks Distance",
"rfe": "Feature Selection",
"learning": "Learning Curve",
"manifold": "Manifold Learning",
"vc": "Validation Curve",
"feature": "Feature Importance",
"feature_all": "Feature Importance (All)",
"tree": "Decision Tree",
"residuals_interactive": "Interactive Residuals",
}
if log_plots == True:
log_plots = ["residuals", "error", "feature"]
return pycaret.internal.tabular.setup(
ml_usecase="regression",
available_plots=available_plots,
data=data,
target=target,
train_size=train_size,
test_data=test_data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_outliers=remove_outliers,
outliers_threshold=outliers_threshold,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
create_clusters=create_clusters,
cluster_iter=cluster_iter,
polynomial_features=polynomial_features,
polynomial_degree=polynomial_degree,
trigonometry_features=trigonometry_features,
polynomial_threshold=polynomial_threshold,
group_features=group_features,
group_names=group_names,
feature_selection=feature_selection,
feature_selection_threshold=feature_selection_threshold,
feature_selection_method=feature_selection_method,
feature_interaction=feature_interaction,
feature_ratio=feature_ratio,
interaction_threshold=interaction_threshold,
transform_target=transform_target,
transform_target_method=transform_target_method,
data_split_shuffle=data_split_shuffle,
data_split_stratify=data_split_stratify,
fold_strategy=fold_strategy,
fold=fold,
fold_shuffle=fold_shuffle,
fold_groups=fold_groups,
n_jobs=n_jobs,
use_gpu=use_gpu,
custom_pipeline=custom_pipeline,
html=html,
session_id=session_id,
log_experiment=log_experiment,
experiment_name=experiment_name,
experiment_custom_tags=experiment_custom_tags,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=silent,
verbose=verbose,
profile=profile,
profile_kwargs=profile_kwargs,
)
def compare_models(
include: Optional[List[Union[str, Any]]] = None,
exclude: Optional[List[str]] = None,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
cross_validation: bool = True,
sort: str = "R2",
n_select: int = 1,
budget_time: Optional[float] = None,
turbo: bool = True,
errors: str = "ignore",
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
experiment_custom_tags: Optional[Dict[str, Any]] = None,
verbose: bool = True,
display: Optional[Display] = None,
parallel: Optional[ParallelBackend] = None,
):
"""
This function trains and evaluates performance of all estimators available in the
model library using cross validation. The output of this function is a score grid
with average cross validated scores. Metrics evaluated during CV can be accessed
using the ``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> best_model = compare_models()
include: list of str or scikit-learn compatible object, default = None
To train and evaluate select models, list containing model ID or scikit-learn
compatible object can be passed in include param. To see a list of all models
available in the model library use the ``models`` function.
exclude: list of str, default = None
To omit certain models from training and evaluation, pass a list containing
model id in the exclude parameter. To see a list of all models available
in the model library use the ``models`` function.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
cross_validation: bool, default = True
When set to False, metrics are evaluated on holdout set. ``fold`` param
is ignored when cross_validation is set to False.
sort: str, default = 'R2'
The sort order of the score grid. It also accepts custom metrics that are
added through the ``add_metric`` function.
n_select: int, default = 1
Number of top_n models to return. For example, to select top 3 models use
n_select = 3.
budget_time: int or float, default = None
If not None, will terminate execution of the function after budget_time
minutes have passed and return results up to that point.
turbo: bool, default = True
When set to True, it excludes estimators with longer training times. To
see which algorithms are excluded use the ``models`` function.
errors: str, default = 'ignore'
When set to 'ignore', will skip the model with exceptions and continue.
If 'raise', will break the function when exceptions are raised.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
display: pycaret.internal.Display.Display, default = None
Custom display object
parallel: pycaret.parallel.parallel_backend.ParallelBackend, default = None
A ParallelBackend instance. For example if you have a SparkSession ``session``,
you can use ``FugueBackend(session)`` to make this function running using
Spark. For more details, see
:class:`~pycaret.parallel.fugue_backend.FugueBackend`
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when 'GroupKFold' is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in the training dataset. When string is passed, it is interpreted
as the column name in the dataset containing group labels.
experiment_custom_tags: dict, default = None
Dictionary of tag_name: String -> value: (String, but will be string-ified
if not) passed to the mlflow.set_tags to add new custom tags for the experiment.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained model or list of trained models, depending on the ``n_select`` param.
Warnings
--------
- Changing turbo parameter to False may result in very high training times with
datasets exceeding 10,000 rows.
- No models are logged in ``MLFlow`` when ``cross_validation`` parameter is False.
"""
params = dict(locals())
if parallel is not None:
global _pycaret_setup_call
parallel.attach(_pycaret_setup_call["func"], _pycaret_setup_call["params"])
if params.get("include", None) is None:
_models = models()
if turbo:
_models = _models[_models.Turbo]
params["include"] = _models.index.tolist()
del params["parallel"]
return parallel.compare_models(compare_models, params)
return pycaret.internal.tabular.compare_models(
include=include,
exclude=exclude,
fold=fold,
round=round,
cross_validation=cross_validation,
sort=sort,
n_select=n_select,
budget_time=budget_time,
turbo=turbo,
errors=errors,
fit_kwargs=fit_kwargs,
groups=groups,
experiment_custom_tags=experiment_custom_tags,
verbose=verbose,
display=display,
)
def create_model(
estimator: Union[str, Any],
fold: Optional[Union[int, Any]] = None,
round: int = 4,
cross_validation: bool = True,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
verbose: bool = True,
**kwargs,
):
"""
This function trains and evaluates the performance of a given estimator
using cross validation. The output of this function is a score grid with
CV scores by fold. Metrics evaluated during CV can be accessed using the
``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function. All the available models
can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
estimator: str or scikit-learn compatible object
ID of an estimator available in model library or pass an untrained
model object consistent with scikit-learn API. Estimators available
in the model library (ID - Name):
* 'lr' - Linear Regression
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determination
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Regression
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree Regressor
* 'rf' - Random Forest Regressor
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting Regressor
* 'mlp' - MLP Regressor
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting Machine
* 'catboost' - CatBoost Regressor
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
cross_validation: bool, default = True
When set to False, metrics are evaluated on holdout set. ``fold`` param
is ignored when cross_validation is set to False.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
Warnings
--------
- Models are not logged on the ``MLFlow`` server when ``cross_validation`` param
is set to False.
"""
return pycaret.internal.tabular.create_model_supervised(
estimator=estimator,
fold=fold,
round=round,
cross_validation=cross_validation,
fit_kwargs=fit_kwargs,
groups=groups,
verbose=verbose,
**kwargs,
)
def tune_model(
estimator,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
n_iter: int = 10,
custom_grid: Optional[Union[Dict[str, list], Any]] = None,
optimize: str = "R2",
custom_scorer=None,
search_library: str = "scikit-learn",
search_algorithm: Optional[str] = None,
early_stopping: Any = False,
early_stopping_max_iters: int = 10,
choose_better: bool = False,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
return_tuner: bool = False,
verbose: bool = True,
tuner_verbose: Union[int, bool] = True,
**kwargs,
):
"""
This function tunes the hyperparameters of a given estimator. The output of
this function is a score grid with CV scores by fold of the best selected
model based on ``optimize`` parameter. Metrics evaluated during CV can be
accessed using the ``get_metrics`` function. Custom metrics can be added
or removed using ``add_metric`` and ``remove_metric`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> tuned_lr = tune_model(lr)
estimator: scikit-learn compatible object
Trained model object
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
n_iter: int, default = 10
Number of iterations in the grid search. Increasing 'n_iter' may improve
model performance but also increases the training time.
custom_grid: dictionary, default = None
To define custom search space for hyperparameters, pass a dictionary with
parameter name and values to be iterated. Custom grids must be in a format
supported by the defined ``search_library``.
optimize: str, default = 'R2'
Metric name to be evaluated for hyperparameter tuning. It also accepts custom
metrics that are added through the ``add_metric`` function.
custom_scorer: object, default = None
custom scoring strategy can be passed to tune hyperparameters of the model.
It must be created using ``sklearn.make_scorer``. It is equivalent of adding
custom metric using the ``add_metric`` function and passing the name of the
custom metric in the ``optimize`` parameter.
Will be deprecated in future.
search_library: str, default = 'scikit-learn'
The search library used for tuning hyperparameters. Possible values:
- 'scikit-learn' - default, requires no further installation
https://github.com/scikit-learn/scikit-learn
- 'scikit-optimize' - ``pip install scikit-optimize``
https://scikit-optimize.github.io/stable/
- 'tune-sklearn' - ``pip install tune-sklearn ray[tune]``
https://github.com/ray-project/tune-sklearn
- 'optuna' - ``pip install optuna``
https://optuna.org/
search_algorithm: str, default = None
The search algorithm depends on the ``search_library`` parameter.
Some search algorithms require additional libraries to be installed.
If None, will use search library-specific default algorithm.
- 'scikit-learn' possible values:
- 'random' : random grid search (default)
- 'grid' : grid search
- 'scikit-optimize' possible values:
- 'bayesian' : Bayesian search (default)
- 'tune-sklearn' possible values:
- 'random' : random grid search (default)
- 'grid' : grid search
- 'bayesian' : ``pip install scikit-optimize``
- 'hyperopt' : ``pip install hyperopt``
- 'optuna' : ``pip install optuna``
- 'bohb' : ``pip install hpbandster ConfigSpace``
- 'optuna' possible values:
- 'random' : randomized search
- 'tpe' : Tree-structured Parzen Estimator search (default)
early_stopping: bool or str or object, default = False
Use early stopping to stop fitting to a hyperparameter configuration
if it performs poorly. Ignored when ``search_library`` is scikit-learn,
or if the estimator does not have 'partial_fit' attribute. If False or
None, early stopping will not be used. Can be either an object accepted
by the search library or one of the following:
- 'asha' for Asynchronous Successive Halving Algorithm
- 'hyperband' for Hyperband
- 'median' for Median Stopping Rule
- If False or None, early stopping will not be used.
early_stopping_max_iters: int, default = 10
Maximum number of epochs to run for each sampled configuration.
Ignored if ``early_stopping`` is False or None.
choose_better: bool, default = False
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the tuner.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
return_tuner: bool, default = False
When set to True, will return a tuple of (model, tuner_object).
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
tuner_verbose: bool or in, default = True
If True or above 0, will print messages from the tuner. Higher values
print more messages. Ignored when ``verbose`` param is False.
**kwargs:
Additional keyword arguments to pass to the optimizer.
Returns:
Trained Model and Optional Tuner Object when ``return_tuner`` is True.
Warnings
--------
- Using 'grid' as ``search_algorithm`` may result in very long computation.
Only recommended with smaller search spaces that can be defined in the
``custom_grid`` parameter.
- ``search_library`` 'tune-sklearn' does not support GPU models.
"""
return pycaret.internal.tabular.tune_model_supervised(
estimator=estimator,
fold=fold,
round=round,
n_iter=n_iter,
custom_grid=custom_grid,
optimize=optimize,
custom_scorer=custom_scorer,
search_library=search_library,
search_algorithm=search_algorithm,
early_stopping=early_stopping,
early_stopping_max_iters=early_stopping_max_iters,
choose_better=choose_better,
fit_kwargs=fit_kwargs,
groups=groups,
return_tuner=return_tuner,
verbose=verbose,
tuner_verbose=tuner_verbose,
**kwargs,
)
def ensemble_model(
estimator,
method: str = "Bagging",
fold: Optional[Union[int, Any]] = None,
n_estimators: int = 10,
round: int = 4,
choose_better: bool = False,
optimize: str = "R2",
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
verbose: bool = True,
) -> Any:
"""
This function ensembles a given estimator. The output of this function is
a score grid with CV scores by fold. Metrics evaluated during CV can be
accessed using the ``get_metrics`` function. Custom metrics can be added
or removed using ``add_metric`` and ``remove_metric`` function.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> dt = create_model('dt')
>>> bagged_dt = ensemble_model(dt, method = 'Bagging')
estimator: scikit-learn compatible object
Trained model object
method: str, default = 'Bagging'
Method for ensembling base estimator. It can be 'Bagging' or 'Boosting'.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
n_estimators: int, default = 10
The number of base estimators in the ensemble. In case of perfect fit, the
learning procedure is stopped early.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
choose_better: bool, default = False
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
optimize: str, default = 'R2'
Metric to compare for model selection when ``choose_better`` is True.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained Model
"""
return pycaret.internal.tabular.ensemble_model(
estimator=estimator,
method=method,
fold=fold,
n_estimators=n_estimators,
round=round,
choose_better=choose_better,
optimize=optimize,
fit_kwargs=fit_kwargs,
groups=groups,
verbose=verbose,
)
def blend_models(
estimator_list: list,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
choose_better: bool = False,
optimize: str = "R2",
weights: Optional[List[float]] = None,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
verbose: bool = True,
):
"""
This function trains a Voting Regressor for select models passed in the
``estimator_list`` param. The output of this function is a score grid with
CV scores by fold. Metrics evaluated during CV can be accessed using the
``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> top3 = compare_models(n_select = 3)
>>> blender = blend_models(top3)
estimator_list: list of scikit-learn compatible objects
List of trained model objects
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
choose_better: bool, default = False
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
optimize: str, default = 'R2'
Metric to compare for model selection when ``choose_better`` is True.
weights: list, default = None
Sequence of weights (float or int) to weight the occurrences of predicted class
labels (hard voting) or class probabilities before averaging (soft voting). Uses
uniform weights when None.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained Model
"""
return pycaret.internal.tabular.blend_models(
estimator_list=estimator_list,
fold=fold,
round=round,
choose_better=choose_better,
optimize=optimize,
method="auto",
weights=weights,
fit_kwargs=fit_kwargs,
groups=groups,
verbose=verbose,
)
def stack_models(
estimator_list: list,
meta_model=None,
meta_model_fold: Optional[Union[int, Any]] = 5,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
restack: bool = True,
choose_better: bool = False,
optimize: str = "R2",
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
verbose: bool = True,
):
"""
This function trains a meta model over select estimators passed in
the ``estimator_list`` parameter. The output of this function is a
score grid with CV scores by fold. Metrics evaluated during CV can
be accessed using the ``get_metrics`` function. Custom metrics
can be added or removed using ``add_metric`` and ``remove_metric``
function.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> top3 = compare_models(n_select = 3)
>>> stacker = stack_models(top3)
estimator_list: list of scikit-learn compatible objects
List of trained model objects
meta_model: scikit-learn compatible object, default = None
When None, Linear Regression is trained as a meta model.
meta_model_fold: integer or scikit-learn compatible CV generator, default = 5
Controls internal cross-validation. Can be an integer or a scikit-learn
CV generator. If set to an integer, will use (Stratifed)KFold CV with
that many folds. See scikit-learn documentation on Stacking for
more details.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
restack: bool, default = True
When set to False, only the predictions of estimators will be used as
training data for the ``meta_model``.
choose_better: bool, default = False
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
optimize: str, default = 'R2'
Metric to compare for model selection when ``choose_better`` is True.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained Model
"""
return pycaret.internal.tabular.stack_models(
estimator_list=estimator_list,
meta_model=meta_model,
meta_model_fold=meta_model_fold,
fold=fold,
round=round,
method="auto",
restack=restack,
choose_better=choose_better,
optimize=optimize,
fit_kwargs=fit_kwargs,
groups=groups,
verbose=verbose,
)
def plot_model(
estimator,
plot: str = "residuals",
scale: float = 1,
save: bool = False,
fold: Optional[Union[int, Any]] = None,
fit_kwargs: Optional[dict] = None,
plot_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
use_train_data: bool = False,
verbose: bool = True,
display_format: Optional[str] = None,
) -> str:
"""
This function analyzes the performance of a trained model on holdout set.
It may require re-training the model in certain cases.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> plot_model(lr, plot = 'residual')
estimator: scikit-learn compatible object
Trained model object
plot: str, default = 'residual'
List of available plots (ID - Name):
* 'residuals_interactive' - Interactive Residual plots
* 'residuals' - Residuals Plot
* 'error' - Prediction Error Plot
* 'cooks' - Cooks Distance Plot
* 'rfe' - Recursive Feat. Selection
* 'learning' - Learning Curve
* 'vc' - Validation Curve
* 'manifold' - Manifold Learning
* 'feature' - Feature Importance
* 'feature_all' - Feature Importance (All)
* 'parameter' - Model Hyperparameter
* 'tree' - Decision Tree
scale: float, default = 1
The resolution scale of the figure.
save: bool, default = False
When set to True, plot is saved in the current working directory.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
plot_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the visualizer class.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
use_train_data: bool, default = False
When set to true, train data will be used for plots, instead
of test data.
verbose: bool, default = True
When set to False, progress bar is not displayed.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
"""
return pycaret.internal.tabular.plot_model(
estimator=estimator,
plot=plot,
scale=scale,
save=save,
fold=fold,
fit_kwargs=fit_kwargs,
plot_kwargs=plot_kwargs,
groups=groups,
verbose=verbose,
use_train_data=use_train_data,
system=True,
display_format=display_format,
)
def evaluate_model(
estimator,
fold: Optional[Union[int, Any]] = None,
fit_kwargs: Optional[dict] = None,
plot_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
use_train_data: bool = False,
):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> evaluate_model(lr)
estimator: scikit-learn compatible object
Trained model object
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
plot_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the visualizer class.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
use_train_data: bool, default = False
When set to true, train data will be used for plots, instead
of test data.
Returns:
None
Warnings
--------
- This function only works in IPython enabled Notebook.
"""
return pycaret.internal.tabular.evaluate_model(
estimator=estimator,
fold=fold,
fit_kwargs=fit_kwargs,
plot_kwargs=plot_kwargs,
groups=groups,
use_train_data=use_train_data,
)
def interpret_model(
estimator,
plot: str = "summary",
feature: Optional[str] = None,
observation: Optional[int] = None,
use_train_data: bool = False,
X_new_sample: Optional[pd.DataFrame] = None,
y_new_sample: Optional[pd.DataFrame] = None, # add for pfi explainer
save: bool = False,
**kwargs,
):
"""
This function analyzes the predictions generated from a trained model. Most plots
in this function are implemented based on the SHAP (SHapley Additive exPlanations).
For more info on this, please see https://shap.readthedocs.io/en/latest/
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp = setup(data = boston, target = 'medv')
>>> xgboost = create_model('xgboost')
>>> interpret_model(xgboost)
estimator: scikit-learn compatible object
Trained model object
plot : str, default = 'summary'
Abbreviation of type of plot. The current list of plots supported
are (Plot - Name):
* 'summary' - Summary Plot using SHAP
* 'correlation' - Dependence Plot using SHAP
* 'reason' - Force Plot using SHAP
* 'pdp' - Partial Dependence Plot
* 'msa' - Morris Sensitivity Analysis
* 'pfi' - Permutation Feature Importance
feature: str, default = None
This parameter is only needed when plot = 'correlation' or 'pdp'.
By default feature is set to None which means the first column of the
dataset will be used as a variable. A feature parameter must be passed
to change this.
observation: integer, default = None
This parameter only comes into effect when plot is set to 'reason'. If no
observation number is provided, it will return an analysis of all observations
with the option to select the feature on x and y axes through drop down
interactivity. For analysis at the sample level, an observation parameter must
be passed with the index value of the observation in test / hold-out set.
use_train_data: bool, default = False
When set to true, train data will be used for plots, instead
of test data.
X_new_sample: pd.DataFrame, default = None
Row from an out-of-sample dataframe (neither train nor test data) to be plotted.
The sample must have the same columns as the raw input data, and it is transformed
by the preprocessing pipeline automatically before plotting.
y_new_sample: pd.DataFrame, default = None
Row from an out-of-sample dataframe (neither train nor test data) to be plotted.
The sample must have the same columns as the raw input label data, and it is transformed
by the preprocessing pipeline automatically before plotting.
save: bool, default = False
When set to True, Plot is saved as a 'png' file in current working directory.
**kwargs:
Additional keyword arguments to pass to the plot.
Returns:
None
"""
return pycaret.internal.tabular.interpret_model(
estimator=estimator,
plot=plot,
feature=feature,
observation=observation,
use_train_data=use_train_data,
X_new_sample=X_new_sample,
y_new_sample=y_new_sample,
save=save,
**kwargs,
)
def predict_model(
estimator,
data: Optional[pd.DataFrame] = None,
drift_report: bool = False,
round: int = 4,
verbose: bool = True,
) -> pd.DataFrame:
"""
This function predicts ``Label`` using a trained model. When ``data`` is
None, it predicts label on the holdout set.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> pred_holdout = predict_model(lr)
>>> pred_unseen = predict_model(lr, data = unseen_dataframe)
estimator: scikit-learn compatible object
Trained model object
data : pandas.DataFrame
Shape (n_samples, n_features). All features used during training
must be available in the unseen dataset.
drift_report: bool, default = False
When set to True, interactive drift report is generated on test set
with the evidently library.
round: int, default = 4
Number of decimal places to round predictions to.
verbose: bool, default = True
When set to False, holdout score grid is not printed.
Returns:
pandas.DataFrame
Warnings
--------
- The behavior of the ``predict_model`` is changed in version 2.1 without backward
compatibility. As such, the pipelines trained using the version (<= 2.0), may not
work for inference with version >= 2.1. You can either retrain your models with a
newer version or downgrade the version for inference.
"""
return pycaret.internal.tabular.predict_model(
estimator=estimator,
data=data,
drift_report=drift_report,
probability_threshold=None,
encoded_labels=True,
round=round,
verbose=verbose,
ml_usecase=MLUsecase.REGRESSION,
)
def finalize_model(
estimator,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
model_only: bool = True,
experiment_custom_tags: Optional[Dict[str, Any]] = None,
) -> Any:
"""
This function trains a given estimator on the entire dataset including the
holdout set.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> final_lr = finalize_model(lr)
estimator: scikit-learn compatible object
Trained model object
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
model_only: bool, default = True
When set to False, only model object is re-trained and all the
transformations in Pipeline are ignored.
experiment_custom_tags: dict, default = None
Dictionary of tag_name: String -> value: (String, but will be string-ified if
not) passed to the mlflow.set_tags to add new custom tags for the experiment.
Returns:
Trained Model
"""
return pycaret.internal.tabular.finalize_model(
estimator=estimator,
fit_kwargs=fit_kwargs,
groups=groups,
model_only=model_only,
experiment_custom_tags=experiment_custom_tags,
)
def deploy_model(
model,
model_name: str,
authentication: dict,
platform: str = "aws",
):
"""
This function deploys the transformation pipeline and trained model on cloud.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> # sets appropriate credentials for the platform as environment variables
>>> import os
>>> os.environ["AWS_ACCESS_KEY_ID"] = str("foo")
>>> os.environ["AWS_SECRET_ACCESS_KEY"] = str("bar")
>>> deploy_model(model = lr, model_name = 'lr-for-deployment', platform = 'aws', authentication = {'bucket' : 'S3-bucket-name'})
Amazon Web Service (AWS) users:
To deploy a model on AWS S3 ('aws'), the credentials have to be passed. The easiest way is to use environment
variables in your local environment. Following information from the IAM portal of amazon console account
are required:
- AWS Access Key ID
- AWS Secret Key Access
More info: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables
Google Cloud Platform (GCP) users:
To deploy a model on Google Cloud Platform ('gcp'), project must be created
using command line or GCP console. Once project is created, you must create
a service account and download the service account key as a JSON file to set
environment variables in your local environment.
More info: https://cloud.google.com/docs/authentication/production
Microsoft Azure (Azure) users:
To deploy a model on Microsoft Azure ('azure'), environment variables for connection
string must be set in your local environment. Go to settings of storage account on
Azure portal to access the connection string required.
- AZURE_STORAGE_CONNECTION_STRING (required as environment variable)
More info: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?toc=%2Fpython%2Fazure%2FTOC.json
model: scikit-learn compatible object
Trained model object
model_name: str
Name of model.
authentication: dict
Dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'S3-bucket-name', 'path': (optional) folder name under the bucket}
When platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
When platform = 'azure':
{'container': 'azure-container-name'}
platform: str, default = 'aws'
Name of the platform. Currently supported platforms: 'aws', 'gcp' and 'azure'.
Returns:
None
"""
return pycaret.internal.tabular.deploy_model(
model=model,
model_name=model_name,
authentication=authentication,
platform=platform,
)
def save_model(
model, model_name: str, model_only: bool = False, verbose: bool = True, **kwargs
):
"""
This function saves the transformation pipeline and trained model object
into the current working directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> save_model(lr, 'saved_lr_model')
model: scikit-learn compatible object
Trained model object
model_name: str
Name of the model.
model_only: bool, default = False
When set to True, only trained model object is saved instead of the
entire pipeline.
**kwargs:
Additional keyword arguments to pass to joblib.dump().
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Tuple of the model object and the filename.
"""
return pycaret.internal.tabular.save_model(
model=model,
model_name=model_name,
model_only=model_only,
verbose=verbose,
**kwargs,
)
def load_model(
model_name,
platform: Optional[str] = None,
authentication: Optional[Dict[str, str]] = None,
verbose: bool = True,
):
"""
This function loads a previously saved pipeline.
Example
-------
>>> from pycaret.regression import load_model
>>> saved_lr = load_model('saved_lr_model')
model_name: str
Name of the model.
platform: str, default = None
Name of the cloud platform. Currently supported platforms:
'aws', 'gcp' and 'azure'.
authentication: dict, default = None
dictionary of applicable authentication tokens.
when platform = 'aws':
{'bucket' : 'Name of Bucket on S3', 'path': (optional) folder name under the bucket}
when platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
when platform = 'azure':
{'container': 'azure-container-name'}
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Trained Model
"""
return pycaret.internal.tabular.load_model(
model_name=model_name,
platform=platform,
authentication=authentication,
verbose=verbose,
)
def automl(optimize: str = "R2", use_holdout: bool = False) -> Any:
"""
This function returns the best model out of all trained models in
current session based on the ``optimize`` parameter. Metrics
evaluated can be accessed using the ``get_metrics`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> top3 = compare_models(n_select = 3)
>>> tuned_top3 = [tune_model(i) for i in top3]
>>> blender = blend_models(tuned_top3)
>>> stacker = stack_models(tuned_top3)
>>> best_mae_model = automl(optimize = 'MAE')
optimize: str, default = 'R2'
Metric to use for model selection. It also accepts custom metrics
added using the ``add_metric`` function.
use_holdout: bool, default = False
When set to True, metrics are evaluated on holdout set instead of CV.
Returns:
Trained Model
"""
return pycaret.internal.tabular.automl(optimize=optimize, use_holdout=use_holdout)
def pull(pop: bool = False) -> pd.DataFrame:
"""
Returns last printed score grid. Use ``pull`` function after
any training function to store the score grid in pandas.DataFrame.
pop: bool, default = False
If True, will pop (remove) the returned dataframe from the
display container.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.pull(pop=pop)
def models(
type: Optional[str] = None,
internal: bool = False,
raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of models available in the model library.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> all_models = models()
type: str, default = None
- linear : filters and only return linear models
- tree : filters and only return tree based models
- ensemble : filters and only return ensemble models
internal: bool, default = False
When True, will return extra columns and rows used internally.
raise_errors: bool, default = True
When False, will suppress all exceptions, ignoring models
that couldn't be created.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.models(
type=type, internal=internal, raise_errors=raise_errors
)
def get_metrics(
reset: bool = False,
include_custom: bool = True,
raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of available metrics used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> all_metrics = get_metrics()
reset: bool, default = False
When True, will reset all changes made using the ``add_metric``
and ``remove_metric`` function.
include_custom: bool, default = True
Whether to include user added (custom) metrics or not.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models that
couldn't be created.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.get_metrics(
reset=reset,
include_custom=include_custom,
raise_errors=raise_errors,
)
def add_metric(
id: str,
name: str,
score_func: type,
greater_is_better: bool = True,
**kwargs,
) -> pd.Series:
"""
Adds a custom metric to be used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> from sklearn.metrics import explained_variance_score
>>> add_metric('evs', 'EVS', explained_variance_score)
id: str
Unique id for the metric.
name: str
Display name of the metric.
score_func: type
Score function (or loss function) with signature ``score_func(y, y_pred, **kwargs)``.
greater_is_better: bool, default = True
Whether ``score_func`` is higher the better or not.
**kwargs:
Arguments to be passed to score function.
Returns:
pandas.Series
"""
return pycaret.internal.tabular.add_metric(
id=id,
name=name,
score_func=score_func,
target="pred",
greater_is_better=greater_is_better,
**kwargs,
)
def remove_metric(name_or_id: str):
"""
Removes a metric from CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'mredv')
>>> remove_metric('MAPE')
name_or_id: str
Display name or ID of the metric.
Returns:
None
"""
return pycaret.internal.tabular.remove_metric(name_or_id=name_or_id)
def get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv', log_experiment = True)
>>> best = compare_models()
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.get_logs(experiment_name=experiment_name, save=save)
def get_config(variable: str):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- X: Transformed dataset (X)
- y: Transformed dataset (y)
- X_train: Transformed train dataset (X)
- X_test: Transformed test/holdout dataset (X)
- y_train: Transformed train dataset (y)
- y_test: Transformed test/holdout dataset (y)
- seed: random state set through session_id
- prep_pipe: Transformation pipeline
- fold_shuffle_param: shuffle parameter used in Kfolds
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- create_model_container: results grid storage container
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment
- logging_param: log_experiment param
- log_plots_param: log_plots param
- USI: Unique session ID parameter
- fix_imbalance_param: fix_imbalance param
- fix_imbalance_method_param: fix_imbalance_method param
- data_before_preprocess: data before preprocessing
- target_param: name of target variable
- gpu_param: use_gpu param configured through setup
- fold_generator: CV splitter configured in fold_strategy
- fold_param: fold params defined in the setup
- fold_groups_param: fold groups defined in the setup
- stratify_param: stratify parameter defined in the setup
- transform_target_param: transform_target_param in setup
- transform_target_method_param: transform_target_method_param in setup
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> X_train = get_config('X_train')
Returns:
Global variable
"""
return pycaret.internal.tabular.get_config(variable=variable)
def set_config(variable: str, value):
"""
This function resets the global variables. Following variables are
accessible:
- X: Transformed dataset (X)
- y: Transformed dataset (y)
- X_train: Transformed train dataset (X)
- X_test: Transformed test/holdout dataset (X)
- y_train: Transformed train dataset (y)
- y_test: Transformed test/holdout dataset (y)
- seed: random state set through session_id
- prep_pipe: Transformation pipeline
- fold_shuffle_param: shuffle parameter used in Kfolds
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- create_model_container: results grid storage container
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment
- logging_param: log_experiment param
- log_plots_param: log_plots param
- USI: Unique session ID parameter
- fix_imbalance_param: fix_imbalance param
- fix_imbalance_method_param: fix_imbalance_method param
- data_before_preprocess: data before preprocessing
- target_param: name of target variable
- gpu_param: use_gpu param configured through setup
- fold_generator: CV splitter configured in fold_strategy
- fold_param: fold params defined in the setup
- fold_groups_param: fold groups defined in the setup
- stratify_param: stratify parameter defined in the setup
- transform_target_param: transform_target_param in setup
- transform_target_method_param: transform_target_method_param in setup
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> set_config('seed', 123)
Returns:
None
"""
return pycaret.internal.tabular.set_config(variable=variable, value=value)
def save_config(file_name: str):
"""
This function save all global variables to a pickle file, allowing to
later resume without rerunning the ``setup``.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> save_config('myvars.pkl')
Returns:
None
"""
return pycaret.internal.tabular.save_config(file_name=file_name)
def load_config(file_name: str):
"""
This function loads global variables from a pickle file into Python
environment.
Example
-------
>>> from pycaret.regression import load_config
>>> load_config('myvars.pkl')
Returns:
Global variables
"""
return pycaret.internal.tabular.load_config(file_name=file_name)
def get_leaderboard(
finalize_models: bool = False,
model_only: bool = False,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
verbose: bool = True,
) -> pd.DataFrame:
"""
This function returns the leaderboard of all models trained in the
current setup.
Example
-------
>>> from pycaret.regression import get_leaderboard
>>> leaderboard = get_leaderboard()
finalize_models: bool, default = False
If True, will finalize all models in the 'Model' column.
model_only: bool, default = False
When set to False, only model object is returned, instead
of the entire pipeline.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
Ignored if finalize_models is False.
groups: str or array-like, with shape (n_samples,), default = None
Optional group labels when GroupKFold is used for the cross validation.
It takes an array with shape (n_samples, ) where n_samples is the number
of rows in training dataset. When string is passed, it is interpreted as
the column name in the dataset containing group labels.
Ignored if finalize_models is False.
verbose: bool, default = True
Progress bar is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.get_leaderboard(
finalize_models=finalize_models,
model_only=model_only,
fit_kwargs=fit_kwargs,
groups=groups,
verbose=verbose,
)
def dashboard(
estimator, display_format="dash", dashboard_kwargs={}, run_kwargs={}, **kwargs
):
"""
This function generates the interactive dashboard for a trained model. The
dashboard is implemented using ExplainerDashboard (explainerdashboard.readthedocs.io)
Example
-------
>>> from pycaret.datasets import get_data
>>> juice = get_data('juice')
>>> from pycaret.classification import *
>>> exp_name = setup(data = juice, target = 'Purchase')
>>> lr = create_model('lr')
>>> dashboard(lr)
estimator: scikit-learn compatible object
Trained model object
display_format: str, default = 'dash'
Render mode for the dashboard. The default is set to ``dash`` which will
render a dashboard in browser. There are four possible options:
- 'dash' - displays the dashboard in browser
- 'inline' - displays the dashboard in the jupyter notebook cell.
- 'jupyterlab' - displays the dashboard in jupyterlab pane.
- 'external' - displays the dashboard in a separate tab. (use in Colab)
dashboard_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ``ExplainerDashboard`` class.
run_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ``run`` method of ``ExplainerDashboard``.
**kwargs:
Additional keyword arguments to pass to the ``ClassifierExplainer`` or
``RegressionExplainer`` class.
Returns:
None
"""
return pycaret.internal.tabular.dashboard(
estimator, display_format, dashboard_kwargs, run_kwargs, **kwargs
)
def convert_model(estimator, language: str = "python") -> str:
"""
This function transpiles trained machine learning models into native
inference script in different programming languages (Python, C, Java,
Go, JavaScript, Visual Basic, C#, PowerShell, R, PHP, Dart, Haskell,
Ruby, F#). This functionality is very useful if you want to deploy models
into environments where you can't install your normal Python stack to
support model inference.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> lr_java = export_model(lr, 'java')
estimator: scikit-learn compatible object
Trained model object
language: str, default = 'python'
Language in which inference script to be generated. Following
options are available:
* 'python'
* 'java'
* 'javascript'
* 'c'
* 'c#'
* 'f#'
* 'go'
* 'haskell'
* 'php'
* 'powershell'
* 'r'
* 'ruby'
* 'vb'
* 'dart'
Returns:
str
"""
return pycaret.internal.tabular.convert_model(estimator, language)
def eda(data=None, target: str = None, display_format: str = "bokeh", **kwargs):
"""
This function generates AutoEDA using AutoVIZ library. You must
install Autoviz separately ``pip install autoviz`` to use this
function.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> eda(display_format = 'bokeh')
data: pandas.DataFrame
DataFrame with (n_samples, n_features).
target: str
Name of the target column to be passed in as a string.
display_format: str, default = 'bokeh'
When set to 'bokeh' the plots are interactive. Other option is ``svg`` for static
plots that are generated using matplotlib and seaborn.
**kwargs:
Additional keyword arguments to pass to the AutoVIZ class.
Returns:
None
"""
return pycaret.internal.tabular.eda(
data=data, target=target, display_format=display_format, **kwargs
)
def check_fairness(estimator, sensitive_features: list, plot_kwargs: dict = {}):
"""
There are many approaches to conceptualizing fairness. This function follows
the approach known as group fairness, which asks: Which groups of individuals
are at risk for experiencing harms. This function provides fairness-related
metrics between different groups (also called subpopulation).
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> lr_fairness = check_fairness(lr, sensitive_features = ['chas'])
estimator: scikit-learn compatible object
Trained model object
sensitive_features: list
Sensitive features are relevant groups (also called subpopulations).
You must pass a list of column names that are present in the dataset
as string.
plot_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the matplotlib plot.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.check_fairness(
estimator=estimator,
sensitive_features=sensitive_features,
plot_kwargs=plot_kwargs,
)
def create_api(
estimator, api_name: str, host: str = "127.0.0.1", port: int = 8000
) -> None:
"""
This function takes an input ``estimator`` and creates a POST API for
inference. It only creates the API and doesn't run it automatically.
To run the API, you must run the Python file using ``!python``.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> create_api(lr, 'lr_api')
>>> !python lr_api.py #to run the API
estimator: scikit-learn compatible object
Trained model object
api_name: str
Name of the api as a string.
host: str, default = '127.0.0.1'
API host address.
port: int, default = 8000
port for API.
Returns:
None
"""
return pycaret.internal.tabular.create_api(
estimator=estimator, api_name=api_name, host=host, port=port
)
def create_docker(
api_name: str, base_image: str = "python:3.8-slim", expose_port: int = 8000
) -> None:
"""
This function creates a ``Dockerfile`` and ``requirements.txt`` for
productionalizing API end-point.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> create_api(lr, 'lr_api')
>>> create_docker('lr_api')
api_name: str
Name of API. Must be saved as a .py file in the same folder.
base_image: str, default = "python:3.8-slim"
Name of the base image for Dockerfile.
expose_port: int, default = 8000
port for expose for API in the Dockerfile.
Returns:
None
"""
return pycaret.internal.tabular.create_docker(
api_name=api_name, base_image=base_image, expose_port=expose_port
)
def create_app(estimator, app_kwargs: Optional[dict] = None)-> None:
"""
This function creates a basic gradio app for inference.
It will later be expanded for other app types such as
Streamlit.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
>>> create_app(lr)
estimator: scikit-learn compatible object
Trained model object
app_kwargs: dict, default = {}
arguments to be passed to app class.
Returns:
None
"""
return pycaret.internal.tabular.create_app(estimator=estimator, app_kwargs=app_kwargs) |
the-stack_0_20388 | #===============================================================================
# Copyright 2014-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py SVD example for streaming on shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
def read_csv(f, c, s=0, n=None, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',',
header=None, skiprows=s, nrows=n, dtype=t)
except:
# fall back to numpy genfromtxt
def read_csv(f, c, s=0, n=np.iinfo(np.int64).max):
a = np.genfromtxt(f, usecols=c, delimiter=',', skip_header=s, max_rows=n)
if a.shape[0] == 0:
raise Exception("done")
if a.ndim == 1:
return a[:, np.newaxis]
return a
def main(readcsv=read_csv, method='defaultDense'):
infiles = ["./data/distributed/svd_{}.csv".format(i) for i in range(1, 5)]
# configure a SVD object
algo = d4p.svd(streaming=True)
# let's provide files directly, not a tables/arrays
# Feed file by file
for infile in infiles:
algo.compute(infile)
# All files are done, now finalize the computation
result = algo.finalize()
# SVD result objects provide leftSingularMatrix,
# rightSingularMatrix and singularValues
return result
if __name__ == "__main__":
result = main()
print("\nSingular values:\n", result.singularValues)
print("\nRight orthogonal matrix V:\n", result.rightSingularMatrix)
print(
"\nLeft orthogonal matrix U (first 10 rows):\n",
result.leftSingularMatrix[0:10]
)
print('All looks good!')
|
the-stack_0_20391 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.generic import TemplateView, ListView
from projectroles.views import (
LoggedInPermissionMixin,
ProjectContextMixin,
ProjectPermissionMixin,
)
from projectroles.plugins import get_backend_api
from .models import BackgroundJob
class ProjectBackgroundJobView(
LoginRequiredMixin,
LoggedInPermissionMixin,
ProjectPermissionMixin,
ProjectContextMixin,
ListView,
):
"""Display all ``BackgroundJob`` records for the project."""
template_name = 'bgjobs/project_backgroundjobs.html'
permission_required = 'bgjobs.view_jobs_own'
model = BackgroundJob
def get_queryset(self):
# TODO: filter to user's job if can only see their own
return (
super()
.get_queryset()
.filter(project__sodar_uuid=self.kwargs['project'])
)
class BackgroundJobClearViewBase(
LoginRequiredMixin,
LoggedInPermissionMixin,
ProjectPermissionMixin,
ProjectContextMixin,
TemplateView,
):
"""Base class for view clearing jobs."""
#: The template is the same for both sub classes.
template_name = 'bgjobs/backgroundjob_confirm_clear.html'
#: Set in sub class.
which_jobs = None
#: Set in sub class.
permission_required = None
def get_context_data(self, *args, **kwargs):
result = super().get_context_data(*args, **kwargs)
result['which_jobs'] = self.which_jobs
return result
def post(self, _request, **_kwargs):
"""Handle form POST."""
context = self.get_context_data()
project = context['project']
filter_kwargs = {'project': project}
if self.which_jobs != 'all':
filter_kwargs['user'] = self.request.user
try:
bg_jobs = BackgroundJob.objects.filter(**filter_kwargs)
bg_job_count = bg_jobs.count()
bg_jobs.delete()
timeline = get_backend_api('timeline_backend')
if timeline:
timeline.add_event(
project=self.get_project(self.request, self.kwargs),
app_name='bgjobs',
user=self.request.user,
event_name='clear_bg_jobs',
description='Clearing {} background jobs'.format(
'user-owned' if self.which_jobs != 'all' else 'all'
),
status_type='OK',
)
messages.success(
self.request, 'Removed {} background jobs'.format(bg_job_count)
)
except Exception as ex:
messages.error(
self.request, 'Unable to remove background jobs: {}'.format(ex)
)
return HttpResponseRedirect(
reverse('bgjobs:list', kwargs={'project': project.sodar_uuid})
)
class BackgroundJobClearOwnView(BackgroundJobClearViewBase):
"""View for clearing a user's own background job."""
which_jobs = 'own'
permission_required = 'bgjobs.update_bgjob_own'
class BackgroundJobClearAllView(BackgroundJobClearViewBase):
"""View for clearing a background jobs in a project."""
which_jobs = 'all'
permission_required = 'bgjobs.update_bgjob_all'
|
the-stack_0_20392 | #!/usr/bin/env python
#coding=utf-8
"""
.py:
"""
__author__ = "Francisco Maria Calisto"
__maintainer__ = "Francisco Maria Calisto"
__email__ = "[email protected]"
__license__ = "MIT"
__version__ = "1.0.1"
__status__ = "Development"
__copyright__ = "Copyright 2019, Instituto Superior Técnico (IST)"
__credits__ = [
"Bruno Oliveira",
"Carlos Santiago",
"Jacinto C. Nascimento",
"Pedro Miraldo",
"Nuno Nunes"
]
import os
import sys
from os import path
import tobii_research as tr
import time
# The current folder path.
basePath = os.path.dirname(__file__)
# The path to the repository "src" folder.
joinPath = os.path.join(basePath, '..')
pathAbsPath = os.path.abspath(joinPath)
# Add the directory containing the module to
# the Python path (wants absolute paths).
sys.path.append(pathAbsPath)
def find_eyetrackers_meta():
found_eyetrackers = tr.find_all_eyetrackers()
# available_eyetracker = found_eyetrackers[0]
for available_eyetracker in found_eyetrackers:
print("Address: " + available_eyetracker.address)
print("Model: " + available_eyetracker.model)
print("Name (It's OK if this is empty): " + available_eyetracker.device_name)
print("Serial number: " + available_eyetracker.serial_number)
if tr.CAPABILITY_CAN_SET_DISPLAY_AREA in available_eyetracker.device_capabilities:
print("The display area can be set on the eye tracker.")
else:
print("The display area can not be set on the eye tracker.")
if tr.CAPABILITY_HAS_EXTERNAL_SIGNAL in available_eyetracker.device_capabilities:
print("The eye tracker can deliver an external signal stream.")
else:
print("The eye tracker can not deliver an external signal stream.")
if tr.CAPABILITY_HAS_EYE_IMAGES in available_eyetracker.device_capabilities:
print("The eye tracker can deliver an eye image stream.")
else:
print("The eye tracker can not deliver an eye image stream.")
if tr.CAPABILITY_HAS_GAZE_DATA in available_eyetracker.device_capabilities:
print("The eye tracker can deliver a gaze data stream.")
else:
print("The eye tracker can not deliver a gaze data stream.")
if tr.CAPABILITY_HAS_HMD_GAZE_DATA in available_eyetracker.device_capabilities:
print("The eye tracker can deliver a HMD gaze data stream.")
else:
print("The eye tracker can not deliver a HMD gaze data stream.")
if tr.CAPABILITY_CAN_DO_SCREEN_BASED_CALIBRATION in available_eyetracker.device_capabilities:
print("The eye tracker can do a screen based calibration.")
else:
print("The eye tracker can not do a screen based calibration.")
if tr.CAPABILITY_CAN_DO_MONOCULAR_CALIBRATION in available_eyetracker.device_capabilities:
print("The eye tracker can do a monocular calibration.")
else:
print("The eye tracker can not do a monocular calibration.")
if tr.CAPABILITY_CAN_DO_HMD_BASED_CALIBRATION in available_eyetracker.device_capabilities:
print("The eye tracker can do a HMD screen based calibration.")
else:
print("The eye tracker can not do a HMD screen based calibration.")
if tr.CAPABILITY_HAS_HMD_LENS_CONFIG in available_eyetracker.device_capabilities:
print("The eye tracker can get/set the HMD lens configuration.")
else:
print("The eye tracker can not get/set the HMD lens configuration.")
return available_eyetracker
# ==================== END File ==================== #
|
the-stack_0_20393 | # Exercise #1: Refer to Lial Section 11.3 Examples 3-5. Using the function
# der() as defined, calculate approximate slopes for the functions given in
# Lial Section 11.3 Examples 3(b), 4(c), & 5. Use a small value for delta and
# evaluate der() at the points used in the examples. Round to 4 decimal places.
import numpy
from numpy import arange, cos
import matplotlib.pyplot
from matplotlib.pyplot import *
# A general function for calculating the slope between two points: x and
# x+delta. See Lial Section 11.3 dealing with instantaneous rates of change.
def der( x, delta ):
delta = float( delta )
if delta < (0.0000001):
print ('Value chosen for delta is too small.')
return 1 / delta
else:
slope = ( f (x + delta) - f(x) ) / delta
return slope
# Define a function for demonstration. This function may be changed.
def f(x):
f = cos( x )
return f
point = 1.0 #This is a point at which a derivative will be calculated.
# The following statements initialize variables for computation.
number = 510
increment = 10
y = []
x = []
# What follows shows calculations and list manipulations. Recall that a range
# statement is inclusive of the first number and exclusive of the last. In this
# example we are incrementing by units of 10 from 1 to 500. We are reducing
# the distance between x=1.0 and x=1.0+delta by reducing delta. The slopes
# being calculated are stored in the list y.
for k in range( increment, number, increment ):
delta = 1.0/(k+1)
d = der(point,delta)
x = x + [k]
y = y + [d]
max_x = k + increment
limit = der( point,0.000001 )
print( 'Final value equals', limit )
# The plot shows convergence of the slopes to the instantaneous rate of change.
# Black dots mark computed slopes as delta was reduced. The x-axis is plotted
# using values of k from the range statement in the for loop.
figure()
xlim( 0, max_x+50 )
ylim( min( y ) -0.05, max( y ) + 0.05)
scatter( 540, limit, color='g', s=40, label = 'limiting slope')
legend( ( 'limiting slope' ), loc = 'best' )
scatter( x, y, c='k', s=20 )
title ('Example of Convergence to Instanteous Rate of Change')
xlabel('x-axis')
ylabel('y-axis')
ylabel('y-axis')
plot( x,y )
show()
|
the-stack_0_20395 |
import discord
from discord.ext import commands, tasks
import asyncio
import datetime
import functools
import html
import io
import logging
import re
import sys
import time
import textwrap
import traceback
import urllib
import aiohttp
from bs4 import BeautifulSoup
import dateutil.parser
import dateutil.tz
import feedparser
import pytz
from utilities import checks
errors_logger = logging.getLogger("errors")
def setup(bot):
bot.add_cog(RSS(bot))
class RSS(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Generate tzinfos
self.tzinfos = {}
for timezone_abbreviation in ("EDT", "EST"):
matching_timezones = list(filter(lambda t: datetime.datetime.now(pytz.timezone(t)).strftime("%Z") == timezone_abbreviation, pytz.common_timezones))
matching_utc_offsets = set(datetime.datetime.now(pytz.timezone(t)).strftime("%z") for t in matching_timezones)
if len(matching_utc_offsets) == 1:
self.tzinfos[timezone_abbreviation] = dateutil.tz.gettz(matching_timezones[0])
self.new_feed = asyncio.Event()
self.check_feeds.start().set_name("RSS")
def cog_unload(self):
self.check_feeds.cancel()
async def inititalize_database(self):
await self.bot.connect_to_database()
await self.bot.db.execute("CREATE SCHEMA IF NOT EXISTS rss")
await self.bot.db.execute(
"""
CREATE TABLE IF NOT EXISTS rss.feeds (
channel_id BIGINT,
feed TEXT,
last_checked TIMESTAMPTZ,
ttl INT,
PRIMARY KEY (channel_id, feed)
)
"""
)
await self.bot.db.execute(
"""
CREATE TABLE IF NOT EXISTS rss.entries (
entry TEXT,
feed TEXT,
PRIMARY KEY (entry, feed)
)
"""
)
await self.bot.db.execute(
"""
CREATE TABLE IF NOT EXISTS rss.errors (
timestamp TIMESTAMPTZ PRIMARY KEY DEFAULT NOW(),
feed TEXT,
type TEXT,
message TEXT
)
"""
)
@commands.group(aliases = ["feed"], invoke_without_command = True, case_insensitive = True)
@checks.not_forbidden()
async def rss(self, ctx):
'''RSS'''
await ctx.send_help(ctx.command)
@rss.command()
@commands.check_any(checks.is_permitted(), checks.is_guild_owner())
async def add(self, ctx, url: str):
'''Add a feed to a channel'''
following = await ctx.bot.db.fetchval(
"""
SELECT EXISTS (
SELECT FROM rss.feeds
WHERE channel_id = $1 AND feed = $2
)
""",
ctx.channel.id, url
)
if following:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} This text channel is already following that feed")
async with ctx.bot.aiohttp_session.get(url) as resp:
feed_text = await resp.text()
# TODO: Handle issues getting URL
partial = functools.partial(feedparser.parse, io.BytesIO(feed_text.encode("UTF-8")),
response_headers = {"Content-Location": url})
feed_info = await self.bot.loop.run_in_executor(None, partial)
# Still necessary to run in executor?
# TODO: Handle if feed already being followed elsewhere
ttl = None
if "ttl" in feed_info.feed:
ttl = int(feed_info.feed.ttl)
for entry in feed_info.entries:
await ctx.bot.db.execute(
"""
INSERT INTO rss.entries (entry, feed)
VALUES ($1, $2)
ON CONFLICT (entry, feed) DO NOTHING
""",
entry.id, url
)
await ctx.bot.db.execute(
"""
INSERT INTO rss.feeds (channel_id, feed, last_checked, ttl)
VALUES ($1, $2, NOW(), $3)
""",
ctx.channel.id, url, ttl
)
await ctx.embed_reply(f"The feed, {url}, has been added to this channel")
self.new_feed.set()
@rss.command(aliases = ["delete"])
@commands.check_any(checks.is_permitted(), checks.is_guild_owner())
async def remove(self, ctx, url: str):
'''Remove a feed from a channel'''
deleted = await ctx.bot.db.fetchval(
"""
DELETE FROM rss.feeds
WHERE channel_id = $1 AND feed = $2
RETURNING *
""",
ctx.channel.id, url
)
if not deleted:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} This channel isn't following that feed")
await ctx.embed_reply(f"The feed, {url}, has been removed from this channel")
@rss.command(aliases = ["feed"])
@checks.not_forbidden()
async def feeds(self, ctx):
'''Show feeds being followed in this channel'''
records = await ctx.bot.db.fetch("SELECT feed FROM rss.feeds WHERE channel_id = $1", ctx.channel.id)
await ctx.embed_reply('\n'.join(record["feed"] for record in records),
title = "RSS feeds being followed in this channel")
# R/PT60S
@tasks.loop(seconds = 60)
async def check_feeds(self):
records = await self.bot.db.fetch(
"""
SELECT DISTINCT ON (feed) feed, last_checked, ttl
FROM rss.feeds
ORDER BY feed, last_checked
"""
)
if not records:
self.new_feed.clear()
await self.new_feed.wait()
for record in records:
feed = record["feed"]
if record["ttl"] and datetime.datetime.now(datetime.timezone.utc) < record["last_checked"] + datetime.timedelta(minutes = record["ttl"]):
continue
try:
async with self.bot.aiohttp_session.get(feed) as resp:
feed_text = await resp.text()
feed_info = await self.bot.loop.run_in_executor(None, functools.partial(feedparser.parse, io.BytesIO(feed_text.encode("UTF-8")), response_headers = {"Content-Location": feed}))
# Still necessary to run in executor?
ttl = None
if "ttl" in feed_info.feed:
ttl = int(feed_info.feed.ttl)
await self.bot.db.execute(
"""
UPDATE rss.feeds
SET last_checked = NOW(),
ttl = $1
WHERE feed = $2
""",
ttl, feed
)
for entry in feed_info.entries:
if "id" not in entry:
continue
inserted = await self.bot.db.fetchrow(
"""
INSERT INTO rss.entries (entry, feed)
VALUES ($1, $2)
ON CONFLICT DO NOTHING
RETURNING *
""",
entry.id, feed
)
if not inserted:
continue
# Get timestamp
## if "published_parsed" in entry:
## timestamp = datetime.datetime.fromtimestamp(time.mktime(entry.published_parsed))
### inaccurate
timestamp = discord.Embed.Empty
try:
if "published" in entry and entry.published:
timestamp = dateutil.parser.parse(entry.published, tzinfos = self.tzinfos)
elif "updated" in entry: # and entry.updated necessary?; check updated first?
timestamp = dateutil.parser.parse(entry.updated, tzinfos = self.tzinfos)
except ValueError:
pass
# Get and set description, title, url + set timestamp
if not (description := entry.get("summary")) and "content" in entry:
description = entry["content"][0].get("value")
if description:
description = BeautifulSoup(description, "lxml").get_text(separator = '\n')
description = re.sub(r"\n\s*\n", '\n', description)
if len(description) > self.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT:
space_index = description.rfind(' ', 0, self.bot.EDCL - 3)
# EDCL: Embed Description Character Limit
description = description[:space_index] + "..."
title = textwrap.shorten(entry.get("title"), width = self.bot.ETiCL, placeholder = "...")
# ETiCL: Embed Title Character Limit
embed = discord.Embed(title = html.unescape(title),
url = entry.link,
description = description,
timestamp = timestamp,
color = self.bot.rss_color)
# Get and set thumbnail url
thumbnail_url = (
(media_thumbnail := entry.get("media_thumbnail")) and media_thumbnail[0].get("url") or
(
(media_content := entry.get("media_content")) and
(media_image := discord.utils.find(lambda c: "image" in c.get("medium", ""), media_content)) and
media_image.get("url")
) or
(
(links := entry.get("links")) and
(image_link := discord.utils.find(lambda l: "image" in l.get("type", ""), links)) and
image_link.get("href")
) or
(
(content := entry.get("content")) and (content_value := content[0].get("value")) and
(content_img := getattr(BeautifulSoup(content_value, "lxml"), "img")) and
content_img.get("src")
) or
(
(media_content := entry.get("media_content")) and
(media_content := discord.utils.find(lambda c: "url" in c, media_content)) and
media_content["url"]
) or
(
(description := entry.get("description")) and
(description_img := getattr(BeautifulSoup(description, "lxml"), "img")) and
description_img.get("src")
)
)
if thumbnail_url:
if not urllib.parse.urlparse(thumbnail_url).netloc:
thumbnail_url = feed_info.feed.link + thumbnail_url
embed.set_thumbnail(url = thumbnail_url)
# Get and set footer icon url
footer_icon_url = (
feed_info.feed.get("icon") or feed_info.feed.get("logo") or
(feed_image := feed_info.feed.get("image")) and feed_image.get("href") or
(parsed_image := BeautifulSoup(feed_text, "lxml").image) and next(iter(parsed_image.attrs.values()), None) or
discord.Embed.Empty
)
embed.set_footer(text = feed_info.feed.get("title", feed), icon_url = footer_icon_url)
# Send embed(s)
channel_records = await self.bot.db.fetch("SELECT channel_id FROM rss.feeds WHERE feed = $1", feed)
for record in channel_records:
if text_channel := self.bot.get_channel(record["channel_id"]):
try:
await text_channel.send(embed = embed)
except discord.Forbidden:
pass
except discord.HTTPException as e:
if e.status == 400 and e.code == 50035:
if ("In embed.url: Not a well formed URL." in e.text or # still necessary?
"In embeds.0.url: Not a well formed URL." in e.text or
(("In embed.url: Scheme" in e.text or #still necessary?
"In embeds.0.url: Scheme" in e.text) and
"is not supported. Scheme must be one of ('http', 'https')." in e.text)):
embed.url = discord.Embed.Empty
if ("In embed.thumbnail.url: Not a well formed URL." in e.text or # still necessary?
"In embeds.0.thumbnail.url: Not a well formed URL." in e.text or
(("In embed.thumbnail.url: Scheme" in e.text or # still necessary?
"In embeds.0.thumbnail.url: Scheme" in e.text) and
"is not supported. Scheme must be one of ('http', 'https')." in e.text)):
embed.set_thumbnail(url = "")
if ("In embed.footer.icon_url: Not a well formed URL." in e.text or
("In embed.footer.icon_url: Scheme" in e.text and
"is not supported. Scheme must be one of ('http', 'https')." in e.text)):
embed.set_footer(text = feed_info.feed.title)
await text_channel.send(embed = embed)
else:
raise
# TODO: Remove text channel data if now non-existent
except (aiohttp.ClientConnectionError, aiohttp.ClientPayloadError,
aiohttp.TooManyRedirects, asyncio.TimeoutError,
UnicodeDecodeError) as e:
await self.bot.db.execute(
"""
INSERT INTO rss.errors (feed, type, message)
VALUES ($1, $2, $3)
""",
feed, type(e).__name__, str(e)
)
# Print error?
await asyncio.sleep(10)
# TODO: Add variable for sleep time
# TODO: Remove persistently erroring feed or exponentially backoff?
except discord.DiscordServerError as e:
self.bot.print(f"RSS Task Discord Server Error: {e}")
await asyncio.sleep(60)
except Exception as e:
print("Exception in RSS Task", file = sys.stderr)
traceback.print_exception(type(e), e, e.__traceback__, file = sys.stderr)
errors_logger.error("Uncaught RSS Task exception\n", exc_info = (type(e), e, e.__traceback__))
print(f" (feed: {feed})")
await asyncio.sleep(60)
@check_feeds.before_loop
async def before_check_feeds(self):
await self.inititalize_database()
await self.bot.wait_until_ready()
@check_feeds.after_loop
async def after_check_feeds(self):
self.bot.print("RSS task cancelled")
|
the-stack_0_20396 | import pybullet as pb
import numpy as np
from math import pi
from pathlib import Path
from .arm import Arm
from .body import Body
from .chain import Chain
from .camera import Camera
from .arm_control import ArmPositionController
from .universal_robot import UR5Kinematics
from .gripper import RG6Gripper
from .gripper_control import RG6GripperController
from .robotiq_gripper import *
class PRLUR5Robot:
def __init__(self, client_id, with_gripper=True, fixed=True):
flags = (
pb.URDF_USE_INERTIA_FROM_FILE
| pb.URDF_ENABLE_CACHED_GRAPHICS_SHAPES
# | pb.URDF_USE_SELF_COLLISION
# | pb.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS
)
# Robot
self._body = Body.load(
"prl_ur5/robot.urdf",
flags=flags,
useFixedBase=fixed,
client_id=client_id,
)
self.moving_joints_map = {}
for i in range(pb.getNumJoints(self._body.body_id)):
info = pb.getJointInfo(self._body.body_id, i)
joint_name = info[1].decode("utf-8", "strict")
joint_type = info[2]
link_name = info[12].decode("utf-8", "strict")
if joint_type != pb.JOINT_FIXED:
self.moving_joints_map[joint_name] = i
left_arm = Arm(self._body, tip_link_name="left_gripper_grasp_frame")
left_arm.controller = ArmPositionController(left_arm, gains=0.1)
left_arm._kinematics = UR5Kinematics(
left_arm._chain,
prefix="left_",
)
# We want the tip to be the camera frame
right_arm = Arm(self._body, tip_link_name="right_camera_color_optical_frame")
right_arm.controller = ArmPositionController(right_arm, gains=0.1)
right_arm._kinematics = UR5Kinematics(
right_arm._chain,
prefix="right_",
)
gripper = None
if with_gripper:
gripper = RG6Gripper(self._body, prefix="left_")
gripper.controller = RG6GripperController(gripper)
self._arm = left_arm
self._right_arm = right_arm
self._gripper = gripper
self.client_id = client_id
self._wrist_cameras = []
def enable_wrist_camera(self, prefix="right_", width=1280, height=720):
link = self._body.link(f"{prefix}camera_color_optical_frame")
cam = Camera(width, height, self._body.client_id)
cam.attach(link=link, orn=(0, 0, np.pi))
self._wrist_cameras.append(cam)
def attach_wrist_camera(
self, prefix="right_", pos=(0, 0, 0), orn=(0, 0, np.pi), width=1280, height=720
):
link = self._body.link(f"{prefix}camera_color_optical_frame")
cam = Camera(width, height, self._body.client_id)
cam.attach(link=link, pos=pos, orn=orn)
return cam
@property
def arm(self):
return self._arm
@property
def right_arm(self):
return self._right_arm
@property
def gripper(self):
return self._gripper
@property
def wrist_cameras(self):
return self._wrist_cameras
|
the-stack_0_20398 | # Copyright 2021-xx iiPython
# tools/init_db.py
# Initializes the required database files
# Modules
import os
import sqlite3
from rich import print
from prism.config import config
# Check the db folder
print("[yellow]Initializing databases...")
db_dir = config.get(["paths", "db_dir"])
if not os.path.isdir(db_dir):
os.mkdir(db_dir)
# Create the databases
def create_db(name: str, cmd: str) -> None:
conn = sqlite3.connect(os.path.abspath(os.path.join(db_dir, name + ".db")))
cursor = conn.cursor()
cursor.execute(cmd)
conn.commit()
conn.close()
create_db("users", """
CREATE TABLE IF NOT EXISTS users (
userid integer,
balance long,
bio text,
accent text
)
""")
create_db("inventory", """
CREATE TABLE IF NOT EXISTS inventory (
userid integer,
name text,
amount integer
)
""")
create_db("guilds", """
CREATE TABLE IF NOT EXISTS guilds (
id integer,
prefix text
)
""")
create_db("bank", """
CREATE TABLE IF NOT EXISTS bank (
userid integer,
balance long
)
""")
# Finish process
print(" [green]databases initialized.")
|
the-stack_0_20399 | import traceback
from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse
from django.conf import settings
from django.db import transaction
from wsgiref.util import FileWrapper
from rest_framework import viewsets, serializers, status, generics, views
from rest_framework.decorators import detail_route, list_route, renderer_classes, parser_classes
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser, BasePermission
from rest_framework.pagination import PageNumberPagination
from django.urls import reverse
from commercialoperator.components.main.models import Region, District, Tenure, ApplicationType, ActivityMatrix, AccessType, Park, Trail, ActivityCategory, Activity, RequiredDocument, Question, GlobalSettings
from commercialoperator.components.main.serializers import RegionSerializer, DistrictSerializer, TenureSerializer, ApplicationTypeSerializer, ActivityMatrixSerializer, AccessTypeSerializer, ParkSerializer, ParkFilterSerializer, TrailSerializer, ActivitySerializer, ActivityCategorySerializer, RequiredDocumentSerializer, QuestionSerializer, GlobalSettingsSerializer, OracleSerializer, BookingSettlementReportSerializer, LandActivityTabSerializer, MarineActivityTabSerializer, EventsParkSerializer, TrailTabSerializer, FilmingParkSerializer
from django.core.exceptions import ValidationError
from django.db.models import Q
from commercialoperator.components.proposals.models import Proposal
from commercialoperator.components.proposals.serializers import ProposalSerializer
from commercialoperator.components.bookings.utils import oracle_integration
from commercialoperator.components.bookings import reports
from ledger.checkout.utils import create_basket_session, create_checkout_session, place_order_submission, get_cookie_basket
from collections import namedtuple
import json
from decimal import Decimal
import logging
logger = logging.getLogger('payment_checkout')
class DistrictViewSet(viewsets.ReadOnlyModelViewSet):
queryset = District.objects.all().order_by('id')
serializer_class = DistrictSerializer
@detail_route(methods=['GET',])
def land_parks(self, request, *args, **kwargs):
instance = self.get_object()
qs = instance.land_parks
qs.order_by('id')
serializer = ParkSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
@detail_route(methods=['GET',])
def parks(self, request, *args, **kwargs):
instance = self.get_object()
qs = instance.parks
qs.order_by('id')
serializer = ParkSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
class RegionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Region.objects.all().order_by('id')
serializer_class = RegionSerializer
class ActivityMatrixViewSet(viewsets.ReadOnlyModelViewSet):
#queryset = ActivityMatrix.objects.all().order_by('id')
queryset = ActivityMatrix.objects.none()
serializer_class = ActivityMatrixSerializer
def get_queryset(self):
user = self.request.user
if user.is_authenticated():
return [ActivityMatrix.objects.filter(name='Commercial Operator').order_by('-version').first()]
return ActivityMatrix.objects.none()
class TenureViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Tenure.objects.all().order_by('order')
serializer_class = TenureSerializer
class ApplicationTypeViewSet(viewsets.ReadOnlyModelViewSet):
#queryset = ApplicationType.objects.all().order_by('order')
queryset = ApplicationType.objects.none()
serializer_class = ApplicationTypeSerializer
def get_queryset(self):
return ApplicationType.objects.order_by('order').filter(visible=True)
class AccessTypeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = AccessType.objects.all().order_by('id')
serializer_class = AccessTypeSerializer
class ParkFilterViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Park.objects.all().order_by('id')
serializer_class = ParkFilterSerializer
class GlobalSettingsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = GlobalSettings.objects.all().order_by('id')
serializer_class = GlobalSettingsSerializer
class LandActivityTabViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing the various serialized viewsets in a single container
"""
def list(self, request):
#Container = namedtuple('ActivityLandTab', ('access_types', 'activity_types', 'regions'))
trails_allowed_activities_id=Trail.objects.all().order_by('allowed_activities').values_list('allowed_activities', flat=True).distinct()
trail_activity_types=Activity.objects.filter(id__in=trails_allowed_activities_id)
Container = namedtuple('ActivityLandTab', ('access_types', 'land_activity_types', 'trail_activity_types', 'marine_activity_types', 'trails', 'marine_activities', 'land_required_documents', 'regions'))
container = Container(
access_types=AccessType.objects.all().order_by('id'),
land_activity_types=Activity.objects.filter(activity_category__activity_type='land').order_by('id'),
trail_activity_types=trail_activity_types,
marine_activity_types=Activity.objects.filter(activity_category__activity_type='marine').order_by('id'),
trails=Trail.objects.all().order_by('id'),
marine_activities=ActivityCategory.objects.filter(activity_type='marine').order_by('id'),
land_required_documents=RequiredDocument.objects.filter().order_by('id'),
regions=Region.objects.all().order_by('id'),
)
#print(container)
serializer = LandActivityTabSerializer(container)
return Response(serializer.data)
class MarineActivityTabViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing the various serialized viewsets in a single container
"""
def list(self, request):
#Container = namedtuple('ActivityLandTab', ('access_types', 'activity_types', 'regions'))
Container = namedtuple('ActivityMarineTab', ('marine_activities', 'marine_parks', 'required_documents'))
container = Container(
#marine_activity_types=Activity.objects.filter(activity_category__activity_type='marine').order_by('id'),
marine_activities=ActivityCategory.objects.filter(activity_type='marine').order_by('id'),
#marine_parks=ActivityCategory.objects.filter(activity_type='marine').order_by('id'),
marine_parks=Park.objects.filter(park_type='marine').order_by('id'),
required_documents=RequiredDocument.objects.filter().order_by('id'),
)
serializer = MarineActivityTabSerializer(container)
return Response(serializer.data)
class ParkViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Park.objects.all().order_by('id')
serializer_class = ParkSerializer
@list_route(methods=['GET',])
def filter_list(self, request, *args, **kwargs):
serializer = ParkFilterSerializer(self.get_queryset(),context={'request':request}, many=True)
return Response(serializer.data)
@list_route(methods=['GET',])
def events_parks_list(self, request, *args, **kwargs):
serializer = EventsParkSerializer(self.get_queryset(),context={'request':request}, many=True)
return Response(serializer.data)
@list_route(methods=['GET',])
def filming_parks_list(self, request, *args, **kwargs):
serializer = FilmingParkSerializer(self.get_queryset(),context={'request':request}, many=True)
return Response(serializer.data)
@list_route(methods=['GET',])
def marine_parks(self, request, *args, **kwargs):
qs = self.get_queryset().filter(park_type='marine')
serializer = ParkSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
@list_route(methods=['GET',])
def land_parks(self, request, *args, **kwargs):
qs = self.get_queryset().filter(park_type='land')
serializer = ParkSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
@detail_route(methods=['GET',])
def allowed_activities(self, request, *args, **kwargs):
instance = self.get_object()
qs = instance.allowed_activities.all()
serializer = ActivitySerializer(qs,context={'request':request}, many=True)
#serializer = ActivitySerializer(qs)
return Response(serializer.data)
@detail_route(methods=['GET',])
def allowed_access(self, request, *args, **kwargs):
instance = self.get_object()
qs = instance.allowed_access.all()
serializer = AccessTypeSerializer(qs,context={'request':request}, many=True)
#serializer = ActivitySerializer(qs)
return Response(serializer.data)
class TrailViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Trail.objects.all().order_by('id')
serializer_class = TrailSerializer
@detail_route(methods=['GET',])
def allowed_activities(self, request, *args, **kwargs):
instance = self.get_object()
qs = instance.allowed_activities.all()
serializer = ActivitySerializer(qs,context={'request':request}, many=True)
#serializer = ActivitySerializer(qs)
return Response(serializer.data)
class LandActivitiesViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Activity.objects.none()
serializer_class = ActivitySerializer
def get_queryset(self):
categories=ActivityCategory.objects.filter(activity_type='land')
activities=Activity.objects.filter(Q(activity_category__in = categories)& Q(visible=True))
return activities
class MarineActivitiesViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ActivityCategory.objects.none()
serializer_class = ActivityCategorySerializer
def get_queryset(self):
categories=ActivityCategory.objects.filter(activity_type='marine')
return categories
class RequiredDocumentViewSet(viewsets.ReadOnlyModelViewSet):
queryset = RequiredDocument.objects.all()
serializer_class = RequiredDocumentSerializer
# def get_queryset(self):
# categories=ActivityCategory.objects.filter(activity_type='marine')
# return categories
class QuestionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
@list_route(methods=['GET',])
def tclass_questions_list(self, request, *args, **kwargs):
qs=Question.objects.filter(application_type__name=ApplicationType.TCLASS)
serializer = QuestionSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
@list_route(methods=['GET',])
def events_questions_list(self, request, *args, **kwargs):
qs=Question.objects.filter(application_type__name=ApplicationType.EVENT)
serializer = QuestionSerializer(qs,context={'request':request}, many=True)
return Response(serializer.data)
class PaymentViewSet(viewsets.ModelViewSet):
#queryset = Proposal.objects.all()
queryset = Proposal.objects.none()
#serializer_class = ProposalSerializer
serializer_class = ProposalSerializer
lookup_field = 'id'
def create(self, request, *args, **kwargs):
response = super(PaymentViewSet, self).create(request, *args, **kwargs)
# here may be placed additional operations for
# extracting id of the object and using reverse()
fallback_url = request.build_absolute_uri('/')
return HttpResponseRedirect(redirect_to=fallback_url + '/success/')
@detail_route(methods=['POST',])
@renderer_classes((JSONRenderer,))
def park_payment(self, request, *args, **kwargs):
try:
with transaction.atomic():
#instance = self.get_object()
proposal = Proposal.objects.get(id=kwargs['id'])
lines = self.create_lines(request)
response = self.checkout(request, proposal, lines, invoice_text='Some invoice text')
return response
#data = [dict(key='My Response')]
#return Response(data)
#return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class BookingSettlementReportView(views.APIView):
renderer_classes = (JSONRenderer,)
def get(self,request,format=None):
try:
http_status = status.HTTP_200_OK
#parse and validate data
report = None
data = {
"date":request.GET.get('date'),
}
serializer = BookingSettlementReportSerializer(data=data)
serializer.is_valid(raise_exception=True)
filename = 'Booking Settlement Report-{}'.format(str(serializer.validated_data['date']))
# Generate Report
report = reports.booking_bpoint_settlement_report(serializer.validated_data['date'])
if report:
response = HttpResponse(FileWrapper(report), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename)
return response
else:
raise serializers.ValidationError('No report was generated.')
except serializers.ValidationError:
raise
except Exception as e:
traceback.print_exc()
#class BookingReportView(views.APIView):
# renderer_classes = (JSONRenderer,)
#
# def get(self,request,format=None):
# try:
# http_status = status.HTTP_200_OK
# #parse and validate data
# report = None
# data = {
# "date":request.GET.get('date'),
# }
# serializer = BookingSettlementReportSerializer(data=data)
# serializer.is_valid(raise_exception=True)
# filename = 'Booking Report-{}'.format(str(serializer.validated_data['date']))
# # Generate Report
# report = reports.bookings_report(serializer.validated_data['date'])
# if report:
# response = HttpResponse(FileWrapper(report), content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename)
# return response
# else:
# raise serializers.ValidationError('No report was generated.')
# except serializers.ValidationError:
# raise
# except Exception as e:
# traceback.print_exc()
class OracleJob(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
try:
data = {
"date":request.GET.get("date"),
"override": request.GET.get("override")
}
serializer = OracleSerializer(data=data)
serializer.is_valid(raise_exception=True)
oracle_integration(serializer.validated_data['date'].strftime('%Y-%m-%d'),serializer.validated_data['override'])
data = {'successful':True}
return Response(data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict)) if hasattr(e, 'error_dict') else serializers.ValidationError(e)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e[0]))
#To display only trails and activity types on Event activity tab
class TrailTabViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing the various serialized viewsets in a single container
"""
def list(self, request):
#Container = namedtuple('ActivityLandTab', ('access_types', 'activity_types', 'regions'))
Container = namedtuple('TrailTab', ('land_activity_types', 'trails',))
container = Container(
land_activity_types=Activity.objects.filter(activity_category__activity_type='land').order_by('id'),
trails=Trail.objects.all().order_by('id'),
)
serializer = TrailTabSerializer(container)
return Response(serializer.data)
|
the-stack_0_20401 | import cv2
import numpy as np
import os
import sys
import time
from matplotlib import pyplot as plt
from IPython import display
screen_x_eff, screen_y_eff = 1125,1958
jumper_foot_offset = 20
holdDt = 1.392
tap_x, tap_y = 600,1000
#load jumper template
jumper_template = cv2.imread('jumper.png')
template_h,template_w = jumper_template.shape[0:2]
def jump(distance):
dt = int(holdDt * distance)
rand_tapxy = np.random.randn(4,)*3 #ad some randomness in the tap location
cmd_msg = 'adb shell input swipe {x1} {y1} {x2} {y2} {dt}'.format(
x1=tap_x+int(rand_tapxy[0]),y1=tap_y+int(rand_tapxy[1]),
x2=tap_x+int(rand_tapxy[2]),y2=tap_y+int(rand_tapxy[3]),
dt=dt)
os.system(cmd_msg)
return dt
def find_position(im_name):
img = cv2.imread(im_name);
res = cv2.matchTemplate(img,jumper_template,cv2.TM_SQDIFF_NORMED) #find jumper template matching
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print(min_val)
if min_val>0.3: #fail to find a match
return -1,-1,img
top_left = min_loc
bottom_right = (top_left[0] + template_w, top_left[1] + template_h)
jumper_xy = (top_left[0]+int(template_w*0.5), top_left[1]+template_h-jumper_foot_offset) #jumper base location
target_xy = (screen_x_eff-jumper_xy[0],screen_y_eff-jumper_xy[1]) #mirror the jumper base location to get the target base location
distance = np.sqrt(np.square(target_xy[0]-jumper_xy[0])+np.square(target_xy[1]-jumper_xy[1])) #compute jump distance
#print(target_xy,distance)
cv2.rectangle(img,top_left, bottom_right, 255, 2) # highlight where the jumper template is found
cv2.circle(img,jumper_xy, 10, 255, 2) # highlight jumper base location
cv2.circle(img,target_xy, 10, 255, 2) # highlight target base location
#print(jumper_xy,target_xy,distance)
return target_xy,distance,img
while True:
os.system('adb shell screencap /sdcard/1.png'); #take a screenshot
os.system('adb pull /sdcard/1.png ./scrshot.png'); #download the screenshot to local disk
target_xy,distance,img = find_position('scrshot.png');
plt.clf()
fig=plt.figure(figsize=(18, 16))
plt.subplot(111)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.xticks([]), plt.yticks([])
plt.show()
display.display(plt.gcf())
display.clear_output(wait=True)
if distance<0: #fail to find match
print('failed');
break;
jump(distance);
time.sleep(2);
|
the-stack_0_20402 | from typing import TYPE_CHECKING
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.schema import Column, ForeignKey
from sqlalchemy.sql.sqltypes import BOOLEAN, INTEGER
from app.apis.v1.entities.models import Entity
from app.database import BaseModel
if TYPE_CHECKING:
from ._Role import Role
class RoleEntityPermission(BaseModel):
__tablename__ = "role_entity_permissions"
entity_id = Column(
INTEGER,
ForeignKey("entities.id"),
nullable=False,
comment="entity's table foreign key",
)
entity = relationship("Entity")
role_id = Column(
INTEGER,
ForeignKey("roles.id"),
nullable=False,
comment="role's table foreign key",
)
can_create = Column(
BOOLEAN,
nullable=False,
server_default=cast(False, BOOLEAN),
comment="can create flag",
)
can_edit = Column(
BOOLEAN,
nullable=False,
server_default=cast(False, BOOLEAN),
comment="can edit flag",
)
def __init__(
self,
entity: "Entity",
role: "Role",
can_create: bool = False,
can_edit: bool = False,
) -> None:
self.entity_id = entity.id
self.role_id = role.id
self.can_create = can_create
self.can_edit = can_edit
|
the-stack_0_20404 | import numpy as np
def get_confusion_matrix_one_hot(model_results, truth):
'''
model_results and truth should be for one-hot format, i.e, have >= 2 columns,
where truth is 0/1, and max along each row of model_results is model result
'''
assert model_results.shape == truth.shape
num_outputs = truth.shape[1]
confusion_matrix = np.zeros((num_outputs, num_outputs), dtype=np.int32)
predictions = np.argmax(model_results, axis=1)
assert len(predictions) == truth.shape[0]
for actual_class in range(num_outputs):
idx_examples_this_class = truth[:, actual_class] == 1
prediction_for_this_class = predictions[idx_examples_this_class]
for predicted_class in range(num_outputs):
count = np.sum(prediction_for_this_class == predicted_class)
confusion_matrix[actual_class, predicted_class] = count
assert np.sum(confusion_matrix) == len(truth)
assert np.sum(confusion_matrix) == np.sum(truth)
return confusion_matrix
|
the-stack_0_20408 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
import logging
from ._config import get_config, set_config, config_context
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module=r'^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.20.0'
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of scikit-learn during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
from .utils._show_versions import show_versions
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'datasets', 'decomposition', 'dummy', 'ensemble', 'exceptions',
'externals', 'feature_extraction', 'feature_selection',
'gaussian_process', 'isotonic', 'kernel_approximation',
'kernel_ridge', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis', 'impute', 'compose',
# Non-modules:
'clone', 'get_config', 'set_config', 'config_context',
'show_versions']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
the-stack_0_20410 | import random
import sys, getopt
from GenericAdaptiveBloomFilter import GenericAdaptiveBloomFilter
from DataSet import DataSet
from LogNull import LogNull
from LogFile import LogFile
from LogScreen import LogScreen
from GenericHashFunctionsSHA512 import GenericHashFunctionsSHA512
from GenericHashFunctionsSHA512All import GenericHashFunctionsSHA512All
# Main method
def main(argv):
# Default values for
# Number of words in the filter
blocks = 1024
# bit width per word
width = 64
# Number of hash functions to set a bit in the word
k=3
# Number of groups of functions that can be changed when a false
# positive is detected
groups=2
# Elements stored in the filter will be factor*blocks
factor=8
# Default name for the file with all the traces in order
traces = './traces/chicago.dirA/equinix-chicago.dirA.20140619-130900_noleadingspace.txt'
# Default name for the folder of the traces and storable elements
folder = './traces/chicago.dirA/'
# Hash function to be used (md5 by default)
hash_f = 'md5'
# How many false positives are required to swap between groups of functions
swap=1
# Retrieve the option values from command line
try:
opts, args = getopt.getopt(sys.argv[1:],"hb:w:k:g:f:t:d:a:s:")
except getopt.GetoptError:
print ('argv[0] -b <words> -w <width> -k <bits> -g <function_groups> -f <factor> -t <filetraces> -d <folder> -a <hash> -s <false_to_swap>')
sys.exit(2)
for opt, arg in opts:
# Help option. Print help and leave.
if opt == '-h':
print ('argv[0] -b <words> -w <width> -k <bits> -g <function_groups> -f <factor> -t <filetraces> -d <folder> -a <hash> -s <false_to_swap>')
sys.exit()
# -b option for setting the number of words in the filter
elif opt == "-b":
blocks=int(arg)
# -w option to set the bit width within each word
elif opt == "-w":
width=int(arg)
# -k option to set the number of hash elements to select the bits to be set
elif opt == "-k":
k=int(arg)
# -g options to set the number of groups of hash functions to swap
# among them when false positives are found
elif opt == "-g":
groups=int(arg)
# -f option to set the factor (factor x words will be stored)
elif opt == "-f":
factor=int(arg)
# -t option to define the traces file to be used
elif opt == "-t":
traces = arg
# -d option to set the directory where the traces and storable element files
# are to be located
elif opt == "-d":
folder = arg
# -a option to change the default md5 hash to other ("sha512" supported)
elif opt == "-a":
hash_f = arg
# -s option to change the number of false positives required to swap
# between groups of functions.
elif opt == "-s":
swap = int(arg)
# Pass the parameters to the run function
run(traces, folder, blocks, width, k, groups, factor, hash_f, swap)
return
# Run the actual experiment using the parameters received
def run (traces, folder, blocks=1024, width=64, k=3, groups=2, factor=8, hash_f='md5', swap=1):
# Number of times to execute each experiment to get an average
# There must exist as many files with the elements to be stored
# as iterations.
totalIterations=10
# Definition of the name of the output files.
logOutput = 'result_b%s_w%s_k%s_g%s_f%s' % (blocks, width, k, groups, factor)
logOutput2 = 'resultmin_b%s_w%s_k%s_g%s_f%s' % (blocks, width, k, groups, factor)
# LogNull does not print, LogFile prints to file and LogScreen to the default output
# Change the objects depending on which one you want to use
log = LogNull()
log2 = LogFile(folder+logOutput, "w")
sc = LogScreen()
# Message explaining the file to be read for the traces
info ="Traces file=%s" % (traces)
sc.write(info)
log.write(info+"\n")
log2.write(info+"\n")
# Message printing the parameters used for the experiment
info ="Initializing parameters blocks=%d, width=%d, k=%d, groups=%d, factor=%d, hash_f=%s, swap=%s" % (blocks, width, k, groups, factor, hash_f, swap)
sc.write(info)
log.write(info+"\n")
log2.write(info+"\n")
# False positive rate accumulation element
fpr = 0
# Run the iterations and get the average
for i in range(1,totalIterations+1):
# The file name should be similar to "/directory/shuf8N_1024B_1.txt"
shuf_file = "%sshuf%sN_%sB_%s.txt" % (folder, factor, blocks, i)
# Data set that keeps the actual elements that were added to the filter
# to perform false positive check
ds = DataSet()
# AdaptiveBloomFilter file
abf = None
# Build the filter passing a SHA512 hash function
if hash_f == 'sha512':
sha = GenericHashFunctionsSHA512(words=blocks, bits=width, nhash=k, hash_groups=groups)
abf = GenericAdaptiveBloomFilter(words=blocks, bits=width, nhash=k, hash_groups=groups, hash_f=sha)
elif hash_f == 'sha512b':
sha = GenericHashFunctionsSHA512All(words=blocks, bits=width, nhash=k, hash_groups=groups)
abf = GenericAdaptiveBloomFilter(words=blocks, bits=width, nhash=k, hash_groups=groups, hash_f=sha)
# Otherwise build it using the default MD5 hash
else:
abf = GenericAdaptiveBloomFilter(words=blocks, bits=width, nhash=k, hash_groups=groups)
# False positives initialized to zero
fp=0
# True positives initialized to zero
tp=0
# True negatives initialized to zero
tn=0
# factor * blocks elements are to be stored
maxin=factor*blocks
# Print the file name with the storable elements that is going to be used
sc.write(shuf_file)
# Open the file
dataToStore = open(shuf_file, 'r')
# Initializing the number of elements stored to zero
stored=0
# Keep storing until factor*blocks is reached or the file ends
while True:
if stored>=maxin:
break
entry = dataToStore.readline()
if not entry:
break
stored+=1
# Store into the Bloom filter
abf.add(entry)
# Store in the slow memory for all the groups of functions
abf.addslow(entry)
# Store the actual value to check for false positives
ds.add(entry)
# Close the file
dataToStore.close()
# Message to verify if we stored the expected number of elements
sc.write("length stored: %s" % ds.length())
# Open the file with the traces
caida = open(folder+traces, 'r')
# Process all elements
while True:
# Read next element
element = caida.readline()
if not element:
break
# By default, consider it a true negative
tn+=1
# If there is a match in the filter
if abf.check(element):
# If it is not an element that was stored
if not ds.test(element):
# Then it is a false positive
fp+=1
# No longer considered true negative
tn-=1
# Swap between functions will ocurr every "swap" false
# positives found. Use module operator to detect
if fp%swap == 0:
abf.swaphash(element)
# It was found and it was actually stored
else:
# It is a true positive
tp+=1
# No longer considered true negative
tn-=1
# Close the file with the traces
caida.close()
# Accumulate the False positive rate. It will be divided by the number of iterations
fpr += fp/(fp+tn)
# Print the result of the iteration
info = "Iteration %s. FP=%d, TP=%d, TN=%d, FPR=%s." % (i, fp, tp, tn, fp/(fp+tn))
sc.write(info)
log.write(info+"\n")
log2.write(info+"\n")
# Print the final result
info = "FPR for %sx%s. FPR %s." % (factor, blocks, round(fpr/totalIterations,6))
sc.write(info)
log.write(info+"\n")
log2.write(info+"\n")
main("")
|
the-stack_0_20411 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Volume:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status': 'str',
'size': 'int',
'availability_zone': 'str',
'attachments': 'list[Attachment]',
'name': 'str',
'description': 'str',
'volume_type': 'str',
'bootable': 'str',
'encrypted': 'bool',
'multiattach': 'bool',
'metadata': 'dict(str, str)'
}
attribute_map = {
'id': 'id',
'status': 'status',
'size': 'size',
'availability_zone': 'availability_zone',
'attachments': 'attachments',
'name': 'name',
'description': 'description',
'volume_type': 'volume_type',
'bootable': 'bootable',
'encrypted': 'encrypted',
'multiattach': 'multiattach',
'metadata': 'metadata'
}
def __init__(self, id=None, status=None, size=None, availability_zone=None, attachments=None, name=None, description=None, volume_type=None, bootable=None, encrypted=None, multiattach=None, metadata=None):
"""Volume - a model defined in huaweicloud sdk"""
self._id = None
self._status = None
self._size = None
self._availability_zone = None
self._attachments = None
self._name = None
self._description = None
self._volume_type = None
self._bootable = None
self._encrypted = None
self._multiattach = None
self._metadata = None
self.discriminator = None
if id is not None:
self.id = id
if status is not None:
self.status = status
if size is not None:
self.size = size
if availability_zone is not None:
self.availability_zone = availability_zone
if attachments is not None:
self.attachments = attachments
if name is not None:
self.name = name
if description is not None:
self.description = description
if volume_type is not None:
self.volume_type = volume_type
if bootable is not None:
self.bootable = bootable
if encrypted is not None:
self.encrypted = encrypted
self.multiattach = multiattach
if metadata is not None:
self.metadata = metadata
@property
def id(self):
"""Gets the id of this Volume.
硬盘ID。
:return: The id of this Volume.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Volume.
硬盘ID。
:param id: The id of this Volume.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this Volume.
磁盘状态。
:return: The status of this Volume.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Volume.
磁盘状态。
:param status: The status of this Volume.
:type: str
"""
self._status = status
@property
def size(self):
"""Gets the size of this Volume.
磁盘大小。
:return: The size of this Volume.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this Volume.
磁盘大小。
:param size: The size of this Volume.
:type: int
"""
self._size = size
@property
def availability_zone(self):
"""Gets the availability_zone of this Volume.
硬盘所属的AZ信息。
:return: The availability_zone of this Volume.
:rtype: str
"""
return self._availability_zone
@availability_zone.setter
def availability_zone(self, availability_zone):
"""Sets the availability_zone of this Volume.
硬盘所属的AZ信息。
:param availability_zone: The availability_zone of this Volume.
:type: str
"""
self._availability_zone = availability_zone
@property
def attachments(self):
"""Gets the attachments of this Volume.
硬盘的挂载信息。
:return: The attachments of this Volume.
:rtype: list[Attachment]
"""
return self._attachments
@attachments.setter
def attachments(self, attachments):
"""Sets the attachments of this Volume.
硬盘的挂载信息。
:param attachments: The attachments of this Volume.
:type: list[Attachment]
"""
self._attachments = attachments
@property
def name(self):
"""Gets the name of this Volume.
磁盘名称。
:return: The name of this Volume.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Volume.
磁盘名称。
:param name: The name of this Volume.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this Volume.
描述。
:return: The description of this Volume.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Volume.
描述。
:param description: The description of this Volume.
:type: str
"""
self._description = description
@property
def volume_type(self):
"""Gets the volume_type of this Volume.
磁盘类型。
:return: The volume_type of this Volume.
:rtype: str
"""
return self._volume_type
@volume_type.setter
def volume_type(self, volume_type):
"""Sets the volume_type of this Volume.
磁盘类型。
:param volume_type: The volume_type of this Volume.
:type: str
"""
self._volume_type = volume_type
@property
def bootable(self):
"""Gets the bootable of this Volume.
显示这个卷是否可启动。
:return: The bootable of this Volume.
:rtype: str
"""
return self._bootable
@bootable.setter
def bootable(self, bootable):
"""Sets the bootable of this Volume.
显示这个卷是否可启动。
:param bootable: The bootable of this Volume.
:type: str
"""
self._bootable = bootable
@property
def encrypted(self):
"""Gets the encrypted of this Volume.
显示该卷是否已被加密。
:return: The encrypted of this Volume.
:rtype: bool
"""
return self._encrypted
@encrypted.setter
def encrypted(self, encrypted):
"""Sets the encrypted of this Volume.
显示该卷是否已被加密。
:param encrypted: The encrypted of this Volume.
:type: bool
"""
self._encrypted = encrypted
@property
def multiattach(self):
"""Gets the multiattach of this Volume.
磁盘是否多挂载。
:return: The multiattach of this Volume.
:rtype: bool
"""
return self._multiattach
@multiattach.setter
def multiattach(self, multiattach):
"""Sets the multiattach of this Volume.
磁盘是否多挂载。
:param multiattach: The multiattach of this Volume.
:type: bool
"""
self._multiattach = multiattach
@property
def metadata(self):
"""Gets the metadata of this Volume.
硬盘的元数据。
:return: The metadata of this Volume.
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Volume.
硬盘的元数据。
:param metadata: The metadata of this Volume.
:type: dict(str, str)
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Volume):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_20413 | """
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from .basic import PopulationDistribution, GaussianProposal
from pyec.distribution.bayes.mutators import StructureMutator
from pyec.distribution.bayes.structure.proposal import StructureProposal
from pyec.distribution.ec.mutators import Gaussian, Bernoulli
from pyec.config import Config
from pyec.history import DoubleMarkovHistory
_ = Config
class SimulatedAnnealingAcceptance(PopulationDistribution):
"""A selector that implements the acceptance probability for simulated
annealing. Computes the acceptance ratio and samples it. If the population
size is greater than one, then this distribution maintains an array
of accepted values, and can be used to run multiple concurrent
markov chains.
Config parameters
* schedule -- The cooling schedule to use. May be any callable function, or
a string, one of ("log", "linear", "discount"). If it is a
callable, then it will be passed the ``updates`` value in
:class:`History` and should return a floating point value
that will divide the exponent in the Boltzmann distribution.
That is, ``schedule(n)`` should converge to zero as n goes to
infinity.
* learningRate -- A divisor for the cooling schedule, used for built-in
schedules "log" and "linear". As a divisor, it divides the
temperature but multiplies the exponent.
* temp0 -- An initial temperature for the temperature decay in "discount"
* restart -- A probability of restarting, tested at each update
* divisor -- A divisor that divides the ``updates`` property of
:class:`History`, scaling the rate of decline in the temperature
* discount -- The decay factor for the built-in discount schedule; ``temp0``
is multiplied by ``discount`` once each time the ``update``
method is called
"""
config = Config(schedule="log",
learningRate = 1.0,
temp0 = 1.0,
restart = 0.0,
divisor = 100.0,
discount = .99,
populationSize = 1,
history = DoubleMarkovHistory)
def compatible(self, history):
return (hasattr(history, 'lastPopulation')
and hasattr(history, 'penultimate')
and hasattr(history, 'reportAcceptance'))
def batch(self, popSize):
temp = self.temperature()
last = self.history.lastPopulation()
penultimate = self.history.penultimate()
if penultimate is None:
return [x for x,s in last]
scoreProposed = np.array([s for x,s in last])
scoreAccepted = np.array([s for x,s in penultimate])
exponent = (scoreProposed - scoreAccepted) / temp
if self.config.minimize:
exponent = -exponent
probs = np.minimum(1.0, np.exp(exponent))
selection = np.random.binomial(1, probs, np.shape(probs))
accepted = 0.0
result = []
for i,sel in enumerate(selection):
if sel > 0.5:
result.append(last[i][0])
accepted += 1.0
else:
result.append(penultimate[i][0])
self.history.reportAcceptance(accepted / popSize)
return result
def temperature(self):
n = 1 + float(self.history.updates) / self.config.divisor
if hasattr(self.config.schedule, '__call__'):
return self.config.schedule(n)
elif self.config.schedule == "linear":
return 1. / (n * self.config.learningRate)
elif self.config.schedule == "log":
return 1. / (np.log(n) * self.config.learningRate)
elif self.config.schedule == "discount":
return 1. / (self.config.temp0 * (self.config.discount ** n))
# Euclidean space
RealSimulatedAnnealing = (
SimulatedAnnealingAcceptance << GaussianProposal[_(sd=.005)]
#GaussianProposal[_(sd=.005)] << SimulatedAnnealingAcceptance
)
# fixed-length bit strings
BinarySimulatedAnnealing = (
SimulatedAnnealingAcceptance << Bernoulli[_(p=.01)]
#Bernoulli[_(p=.01)] << SimulatedAnnealingAcceptance
)
# Structure search in a Bayes net, use a
# pyec.distribution.bayes.space.BayesNetStructure space for searching.
BayesNetSimulatedAnnealing = (
SimulatedAnnealingAcceptance[_(schedule="linear",
divisor=100.)] <<
StructureMutator[_(branchFactor=5)]
)[_(minimize=False)]
|
the-stack_0_20414 | # -*- coding: utf-8 -*-
from django.apps import apps
from threading import Thread
from django.conf import settings
from djangoplus.utils.metadata import get_metadata, get_scope, get_can_execute, count_parameters_names
CACHE = dict(
INITIALIZED=False,
SETTINGS_INSTANCE=None,
# USER INTERFACE
VIEWS=[],
WIDGETS=[],
SUBSET_WIDGETS=[],
MODEL_WIDGETS={},
CARD_PANEL_MODELS=[],
ICON_PANEL_MODELS=[],
LIST_DASHBOARD=[],
SUBSETS=dict(),
MANAGER_METHODS=dict(),
INSTANCE_METHODS=dict(),
SIMPLE_MODELS=[],
# ROLES
ROLE_MODELS=dict(),
ABSTRACT_ROLE_MODELS=dict(),
ABSTRACT_ROLE_MODEL_NAMES=dict(),
# ACTIONS
INSTANCE_ACTIONS=dict(),
QUERYSET_ACTIONS=dict(),
CLASS_ACTIONS=dict(),
CLASS_VIEW_ACTIONS=dict(),
FIELDSET_ACTIONS=dict(),
# DOCUMENTATION
WORKFLOWS=dict(),
CLASS_DIAGRAMS=dict(),
COMPOSITION_FIELDS=dict(),
COMPOSITION_RELATIONS=dict(),
# ACCESS SCOPE
ORGANIZATION_MODEL=None,
UNIT_MODEL=None,
SIGNUP_MODEL=None,
PERMISSIONS_BY_SCOPE=dict(),
# FORMATTERS
FORMATTERS=dict(),
# DOCUMENTATION
LAST_AUTHENTICATED_ROLE=None,
LAST_AUTHENTICATED_USERNAME=None,
# API
API_MODELS=[]
)
if not CACHE['INITIALIZED']:
CACHE['INITIALIZED'] = True
for model in apps.get_models():
model_name = model.__name__.lower()
app_label = get_metadata(model, 'app_label')
add_shortcut = get_metadata(model, 'add_shortcut')
list_shortcut = get_metadata(model, 'list_shortcut')
list_diplay = get_metadata(model, 'list_display')
verbose_name = get_metadata(model, 'verbose_name')
verbose_name_plural = get_metadata(model, 'verbose_name_plural')
menu = get_metadata(model, 'menu')
list_menu = get_metadata(model, 'list_menu')
dashboard = get_metadata(model, 'dashboard')
expose = get_metadata(model, 'expose')
role_signup = get_metadata(model, 'role_signup', False)
field_names = []
for field in get_metadata(model, 'get_fields'):
field_names.append(field.name)
if hasattr(field, 'composition') and field.composition:
CACHE['COMPOSITION_FIELDS'][model] = field.name
if field.remote_field.model not in CACHE['COMPOSITION_RELATIONS']:
CACHE['COMPOSITION_RELATIONS'][field.remote_field.model] = []
if model not in CACHE['COMPOSITION_RELATIONS'][field.remote_field.model]:
CACHE['COMPOSITION_RELATIONS'][field.remote_field.model].append(model)
if model not in CACHE['SUBSETS']:
CACHE['SUBSETS'][model] = []
if model not in CACHE['INSTANCE_ACTIONS']:
CACHE['INSTANCE_ACTIONS'][model] = dict()
if model not in CACHE['QUERYSET_ACTIONS']:
CACHE['QUERYSET_ACTIONS'][model] = dict()
if model not in CACHE['CLASS_ACTIONS']:
CACHE['CLASS_ACTIONS'][model] = dict()
if model not in CACHE['FIELDSET_ACTIONS']:
CACHE['FIELDSET_ACTIONS'][model] = dict()
if role_signup:
CACHE['SIGNUP_MODEL'] = model
# indexing organization model
if hasattr(model, 'organization_ptr_id'):
CACHE['ORGANIZATION_MODEL'] = model
# indexing unit model
if hasattr(model, 'unit_ptr_id'):
CACHE['UNIT_MODEL'] = model
if expose:
CACHE['API_MODELS'].append(model)
# indexing shortcuts
if add_shortcut:
CACHE['ICON_PANEL_MODELS'].append((model, add_shortcut))
if list_shortcut:
CACHE['CARD_PANEL_MODELS'].append((model, list_shortcut))
if dashboard:
CACHE['LIST_DASHBOARD'].append(model)
# indexing the views generated from model classes
url = '/list/{}/{}/'.format(app_label, model_name)
icon = None
if menu and model not in CACHE['COMPOSITION_FIELDS']:
menu_groups = ()
if list_menu and list_menu is not True:
if type(list_menu) != tuple:
list_menu = list_menu,
menu_groups = list_menu
if type(menu) == tuple:
description, icon = menu
else:
description, icon = menu, get_metadata(model, 'icon')
permission = '{}.list_{}'.format(app_label, model_name)
item = dict(
url=url, can_view=permission, menu=description, icon=icon, add_shortcut=False, groups=menu_groups
)
CACHE['VIEWS'].append(item)
if get_metadata(model, 'proxy'):
continue
# indexing the @subset and @meta methods defined in the manager classes
for attr_name in dir(model.objects.get_queryset()):
attr = getattr(model.objects.get_queryset(), attr_name)
if hasattr(attr, '_metadata'):
metadata_type = get_metadata(attr, 'type')
if metadata_type == 'subset':
subset_title = get_metadata(attr, 'verbose_name')
subset_name = get_metadata(attr, 'name')
subset_help_text = get_metadata(attr, 'help_text')
subset_alert = get_metadata(attr, 'alert')
subset_notify = get_metadata(attr, 'notify')
subset_can_view = get_metadata(attr, 'can_view')
subset_order = get_metadata(attr, 'order')
subset_menu = get_metadata(attr, 'menu')
subset_template = get_metadata(attr, 'template')
subset_expose = get_metadata(attr, 'expose')
subset_dashboard = get_metadata(attr, 'dashboard')
subset_list_display = get_metadata(attr, 'list_display')
subset_list_filter = get_metadata(attr, 'list_filter')
subset_search_fields = get_metadata(attr, 'search_fields')
subset_workflow = get_metadata(attr, 'usecase')
subset_url = '{}{}/'.format(url, attr.__func__.__name__)
item = dict(
verbose_name=subset_title, name=attr_name, function=attr, url=subset_url, can_view=subset_can_view,
menu=subset_menu, icon=icon, alert=subset_alert, notify=subset_notify,
order=subset_order, help_text=subset_help_text, list_display=subset_list_display,
list_filter=subset_list_filter, search_fields=subset_search_fields, expose=subset_expose,
template=subset_template
)
CACHE['SUBSETS'][model].append(item)
if subset_dashboard:
widget = dict(
verbose_name=subset_title, model=model, function=attr_name, can_view=subset_can_view,
dashboard=subset_dashboard, formatter=None, link=True, list_display=subset_list_display,
list_filter=subset_list_filter, search_fields=subset_search_fields, template=subset_template
)
CACHE['SUBSET_WIDGETS'].append(widget)
if subset_workflow:
role = subset_can_view and subset_can_view[0] or 'Superusuário'
if attr_name == 'all':
activity_description = 'Listar {}'.format(verbose_name_plural,)
else:
activity_description = 'Listar {}: {}'.format(verbose_name_plural, subset_title)
CACHE['WORKFLOWS'][subset_workflow] = dict(activity=activity_description, role=role, model=None)
# @meta
else:
added = False
for tmp in CACHE['SUBSET_WIDGETS']:
if tmp['function'] == attr_name:
added = True
break
if added:
continue
widget_verbose_name = get_metadata(attr, 'verbose_name')
widget_can_view = get_metadata(attr, 'can_view')
widget_dashboard = get_metadata(attr, 'dashboard')
widget_formatter = get_metadata(attr, 'formatter')
widget_shortcut = get_metadata(attr, 'shortcut')
widget_icon = get_metadata(attr, 'icon')
widget_url = '{}{}/'.format(url, attr.__func__.__name__)
widget = dict(
verbose_name=widget_verbose_name, model=model, function=attr_name, can_view=widget_can_view,
dashboard=widget_dashboard, formatter=widget_formatter, link=False, icon=widget_icon,
shortcut=widget_shortcut, url=widget_url
)
CACHE['SUBSET_WIDGETS'].append(widget)
if model not in CACHE['MANAGER_METHODS']:
CACHE['MANAGER_METHODS'][model] = list()
CACHE['MANAGER_METHODS'][model].append(widget)
# indexing the actions refered in fieldsets
if hasattr(model, 'fieldsets'):
for title, info in model.fieldsets:
if title not in CACHE['FIELDSET_ACTIONS'][model]:
CACHE['FIELDSET_ACTIONS'][model][title] = []
for action_name in info.get('actions', []):
CACHE['FIELDSET_ACTIONS'][model][title].append(action_name)
else:
CACHE['SIMPLE_MODELS'].append(model)
# indexing the actions defined in models
for attr_name in dir(model):
if attr_name[0] != '_' and attr_name not in field_names:
func = getattr(model, attr_name)
if hasattr(func, '_action'):
action = getattr(func, '_action')
action_group = action['group']
action_can_execute = get_can_execute(action)
action_verbose_name = action['verbose_name']
action_workflow = action['usecase']
action_menu = action['menu']
view_name = action['view_name']
if action_group not in CACHE['INSTANCE_ACTIONS'][model]:
CACHE['INSTANCE_ACTIONS'][model][action_group] = dict()
CACHE['INSTANCE_ACTIONS'][model][action_group][view_name] = action
if action_workflow:
role = action_can_execute and action_can_execute[0] or 'Superusuário'
CACHE['WORKFLOWS'][action_workflow] = dict(activity=action_verbose_name, role=role, model=verbose_name)
if action_menu:
url = '/action/{}/{}/{}/'.format(
get_metadata(model, 'app_label'), model.__name__.lower(), attr_name
)
action_view = dict(
verbose_name=action_verbose_name, function=None, url=url, can_view=action_can_execute, menu=action_menu,
icon=None, style='ajax', add_shortcut=False, doc=func.__doc__, usecase=None
)
CACHE['VIEWS'].append(action_view)
if hasattr(func, '_metadata'):
widget_verbose_name = get_metadata(func, 'verbose_name')
widget_can_view = get_metadata(func, 'can_view')
widget_dashboard = get_metadata(func, 'dashboard')
widget_formatter = get_metadata(func, 'formatter')
widget_icon = get_metadata(func, 'icon')
widget = dict(
verbose_name=widget_verbose_name, model=model, function=attr_name, can_view=widget_can_view,
dashboard=widget_dashboard, formatter=widget_formatter, link=False, icon=widget_icon
)
if model not in CACHE['MODEL_WIDGETS']:
CACHE['MODEL_WIDGETS'][model] = []
CACHE['MODEL_WIDGETS'][model].append(widget)
if model not in CACHE['INSTANCE_METHODS']:
CACHE['INSTANCE_METHODS'][model] = list()
CACHE['INSTANCE_METHODS'][model].append(widget)
# indexing the actions related to relations whose model has the add_inline meta-attribute
inlines = []
if hasattr(model, 'fieldsets'):
for fieldset in model.fieldsets:
if 'relations' in fieldset[1]:
for item in fieldset[1]['relations']:
if ':' in item:
# 'relation_name:all[action_a,action_b],subset[action_c]'
relation_name = item.split(':')[0]
elif '[' in item:
# 'relation_name[action_a,action_b]'
relation_name = item.split('[')[0]
else:
# 'relation_name'
relation_name = item
# indexing the actions defined in managers
qs_manager_class = type(model.objects.get_queryset())
for attr_name in dir(qs_manager_class):
if not attr_name[0] == '_':
attr = getattr(qs_manager_class, attr_name)
if hasattr(attr, '_action'):
action = getattr(attr, '_action')
action_verbose_name = action['verbose_name']
action_can_execute = get_can_execute(action)
action_group = action['group']
action_name = action['view_name']
action_subsets = action['subsets']
action_workflow = action['usecase']
is_class_method = isinstance(qs_manager_class.__dict__[attr_name], classmethod)
if not action_subsets:
action['inline'] = True
if is_class_method:
if action_group not in CACHE['CLASS_ACTIONS'][model]:
CACHE['CLASS_ACTIONS'][model][action_group] = dict()
CACHE['CLASS_ACTIONS'][model][action_group][action_name] = action
else:
if action_group not in CACHE['QUERYSET_ACTIONS'][model]:
CACHE['QUERYSET_ACTIONS'][model][action_group] = dict()
CACHE['QUERYSET_ACTIONS'][model][action_group][action_name] = action
if action_workflow:
role = action_can_execute and action_can_execute[0] or 'Superusuário'
CACHE['WORKFLOWS'][action_workflow] = dict(activity=action_verbose_name, role=role, model=verbose_name)
# indexing the formatters
for app_label in settings.INSTALLED_APPS:
try:
module_name = '{}.formatters'.format(app_label)
module = __import__(module_name, fromlist=list(map(str, app_label.split('.'))))
for attr_name in dir(module):
module_attr = getattr(module, attr_name)
if hasattr(module_attr, '_formatter'):
CACHE['FORMATTERS'][getattr(module_attr, '_formatter') or attr_name] = module_attr
except ImportError as e:
pass
from djangoplus.ui.components import Component
for cls in Component.subclasses():
formatter_name = cls.formatter_name or cls.__name__.lower()
if formatter_name not in CACHE['FORMATTERS']:
CACHE['FORMATTERS'][formatter_name] = cls
# indexing the actions, views and widgets in views module
for app_label in settings.INSTALLED_APPS:
try:
module = __import__('{}.views'.format(app_label), fromlist=list(map(str, app_label.split('.'))))
for attr_name in dir(module):
func = getattr(module, attr_name)
# indexing the actions defined in the views
if hasattr(func, '_action'):
action = getattr(func, '_action')
action_group = action['group']
action_model = action['model']
action_function = action['function']
action_name = action['view_name']
action_verbose_name = action['verbose_name']
action_workflow = action['usecase']
action_can_execute = get_can_execute(action)
action_subsets = action['subsets']
action_menu = action['menu']
if action_workflow:
role = action_can_execute and action_can_execute[0] or 'Superusuário'
action_model_verbose_name = get_metadata(action_model, 'verbose_name')
CACHE['WORKFLOWS'][action_workflow] = dict(activity=action_verbose_name, role=role, model=action_model_verbose_name)
# instance action
if count_parameters_names(action_function) > 1:
if action_group not in CACHE['INSTANCE_ACTIONS'][action_model]:
CACHE['INSTANCE_ACTIONS'][action_model][action_group] = dict()
CACHE['INSTANCE_ACTIONS'][action_model][action_group][action_name] = action
# class action
else:
if not action_subsets:
action['inline'] = True
if action_model not in CACHE['CLASS_VIEW_ACTIONS']:
CACHE['CLASS_VIEW_ACTIONS'][action_model] = dict()
if action_group not in CACHE['CLASS_VIEW_ACTIONS'][action_model]:
CACHE['CLASS_VIEW_ACTIONS'][action_model][action_group] = dict()
CACHE['CLASS_VIEW_ACTIONS'][action_model][action_group][action_name] = action
# indexing the views
elif hasattr(func, '_view'):
action = getattr(func, '_view')
CACHE['VIEWS'].append(action)
view_title = action['verbose_name']
view_workflow = action['usecase']
view_can_view = action['can_view']
if view_workflow:
role = view_can_view and view_can_view[0] or 'Superusuário'
CACHE['WORKFLOWS'][view_workflow] = dict(activity=view_title, role=role, model=None)
# indexing the widgets
elif hasattr(func, '_widget'):
CACHE['WIDGETS'].append(getattr(func, '_widget'))
except ImportError as e:
pass
for model in apps.get_models():
app_label = get_metadata(model, 'app_label')
verbose_name = get_metadata(model, 'verbose_name')
role_username = get_metadata(model, 'role_username')
role_signup = get_metadata(model, 'role_signup')
add_label = get_metadata(model, 'add_label', None)
workflow = get_metadata(model, 'usecase', 0)
diagram_classes = get_metadata(model, 'class_diagram', None)
# indexing role models
if role_username:
CACHE['ROLE_MODELS'][model] = dict(
username_field=role_username, scope=get_scope(
model, CACHE['ORGANIZATION_MODEL'], CACHE['UNIT_MODEL']), name=verbose_name
)
for subclass in model.__subclasses__():
subclass_role_username = get_metadata(subclass, 'role_username')
if subclass_role_username:
subclass_verbose_name = get_metadata(subclass, 'verbose_name')
CACHE['ROLE_MODELS'][subclass] = dict(username_field=subclass_role_username, scope=get_scope(
subclass, CACHE['ORGANIZATION_MODEL'], CACHE['UNIT_MODEL']), name=subclass_verbose_name)
if model not in CACHE['ABSTRACT_ROLE_MODELS']:
CACHE['ABSTRACT_ROLE_MODELS'][model] = []
CACHE['ABSTRACT_ROLE_MODEL_NAMES'][verbose_name] = []
CACHE['ABSTRACT_ROLE_MODELS'][model].append(subclass)
CACHE['ABSTRACT_ROLE_MODEL_NAMES'][verbose_name].append(subclass_verbose_name)
permission_by_scope = dict()
for scope in ('role', 'unit', 'organization'):
for permission_name in ('edit', 'add', 'delete', 'view', 'list'):
permission_key = '{}_by_{}'.format(permission_name, scope)
for group_name in get_metadata(model, 'can_{}'.format(permission_key), (), iterable=True):
if permission_name == 'list':
permission_key = 'view_by_{}'.format(scope)
if permission_key not in permission_by_scope:
permission_by_scope[permission_key] = []
if group_name in CACHE['ABSTRACT_ROLE_MODEL_NAMES']:
for concrete_group_name in CACHE['ABSTRACT_ROLE_MODEL_NAMES'][group_name]:
permission_by_scope[permission_key].append(concrete_group_name)
else:
permission_by_scope[permission_key].append(group_name)
for group_name in get_metadata(model, 'can_admin_by_{}'.format(scope), (), iterable=True):
for permission_name in ('edit', 'add', 'delete', 'view', 'list'):
if permission_name == 'list':
permission_key = 'view_by_{}'.format(scope)
else:
permission_key = '{}_by_{}'.format(permission_name, scope)
if permission_key not in permission_by_scope:
permission_by_scope[permission_key] = []
if group_name not in permission_by_scope[permission_key]:
if group_name in CACHE['ABSTRACT_ROLE_MODEL_NAMES']:
for concrete_group_name in CACHE['ABSTRACT_ROLE_MODEL_NAMES'][group_name]:
permission_by_scope[permission_key].append(concrete_group_name)
else:
permission_by_scope[permission_key].append(group_name)
for permission_name in ('edit', 'add', 'delete', 'view', 'list'):
permission_key = permission_name
for group_name in get_metadata(model, 'can_{}'.format(permission_name), (), iterable=True):
if permission_name == 'list':
permission_key = 'view'
if permission_key not in permission_by_scope:
permission_by_scope[permission_key] = []
if group_name not in permission_by_scope[permission_key]:
permission_by_scope[permission_key].append(group_name)
for group_name in get_metadata(model, 'can_admin', (), iterable=True):
for permission_name in ('edit', 'add', 'delete', 'view', 'list'):
permission_key = permission_name
if permission_name == 'list':
permission_key = 'view'
if permission_key not in permission_by_scope:
permission_by_scope[permission_key] = []
if group_name not in permission_by_scope[permission_key]:
permission_by_scope[permission_key].append(group_name)
for actions_dict in (CACHE['INSTANCE_ACTIONS'], CACHE['QUERYSET_ACTIONS']):
for category in actions_dict[model]:
for key in list(actions_dict[model][category].keys()):
name = actions_dict[model][category][key]['verbose_name']
view_name = actions_dict[model][category][key]['view_name']
can_execute = []
for scope in ('', 'role', 'unit', 'organization'):
scope = scope and '_by_{}'.format(scope) or scope
for group_name in actions_dict[model][category][key].get('can_execute{}'.format(scope)) or ():
permission_key = '{}{}'.format(view_name, scope)
if permission_key not in permission_by_scope:
permission_by_scope[permission_key] = []
permission_by_scope[permission_key].append(group_name)
if permission_by_scope:
CACHE['PERMISSIONS_BY_SCOPE'][model] = permission_by_scope
if workflow:
role = permission_by_scope.get('add_by_role') and permission_by_scope.get('add_by_role')[0] or None
if not role:
role = permission_by_scope.get('add_by_unit') and permission_by_scope.get('add_by_unit')[0] or None
if not role:
role = permission_by_scope.get('add_by_organization') and permission_by_scope.get('add_by_organization')[0] or None
if not role:
role = permission_by_scope.get('add') and permission_by_scope.get('add')[0] or None
if not role or role == verbose_name:
role = 'Superusuário'
if model in CACHE['COMPOSITION_FIELDS']:
related_model = getattr(model, CACHE['COMPOSITION_FIELDS'][model]).field.remote_field.model
related_verbose_name = get_metadata(related_model, 'verbose_name')
related_add_label = get_metadata(model, 'add_label')
if related_add_label:
related_add_label = related_add_label.replace(' em ', ' __ ')
activity = '{} em {}'.format(related_add_label, related_verbose_name)
else:
verbose_name = verbose_name.replace(' em ', ' __ ')
activity = 'Adicionar {} em {}'.format(verbose_name, related_verbose_name)
CACHE['WORKFLOWS'][workflow] = dict(activity=activity, role=role, model=None)
else:
if add_label:
activity = add_label
else:
if role_signup:
activity = '{} {}'.format('Cadastrar-se como', verbose_name)
role = verbose_name
else:
activity = '{} {}'.format('Cadastrar', verbose_name)
CACHE['WORKFLOWS'][workflow] = dict(activity=activity, role=role, model=None)
if diagram_classes is not None:
CACHE['CLASS_DIAGRAMS'][verbose_name] = [model]
if type(diagram_classes) == bool and diagram_classes:
for field in model._meta.get_fields():
if field.remote_field and field.remote_field.model:
if field.remote_field.model not in CACHE['CLASS_DIAGRAMS'][verbose_name]:
CACHE['CLASS_DIAGRAMS'][verbose_name].append(field.remote_field.model)
else:
for model_name in diagram_classes:
try:
extra_model = apps.get_model(app_label, model_name)
except LookupError:
for extra_model in apps.get_models():
if extra_model.__name__.lower() == model_name:
break
if extra_model not in CACHE['CLASS_DIAGRAMS'][verbose_name]:
CACHE['CLASS_DIAGRAMS'][verbose_name].append(extra_model)
keys = list(CACHE['WORKFLOWS'].keys())
keys.sort()
l = []
for key in keys:
l.append(CACHE['WORKFLOWS'][key])
CACHE['WORKFLOWS'] = l
if settings.DROPBOX_TOKEN and settings.DEBUG:
def sync_storage():
from djangoplus.utils.storage.dropbox import DropboxStorage
DropboxStorage().sync()
Thread(target=sync_storage).start()
|
the-stack_0_20415 | """
Cross-validation training extensions.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import os
from pylearn2.train import SerializationGuard
from pylearn2.train_extensions.best_params import MonitorBasedSaveBest
from pylearn2.utils import serial
class TrainCVExtension(object):
"""
TrainCV extension class. This class operates on the Train objects
corresponding to each fold of cross-validation, and therefore does not
implement an on_monitor method.
"""
def setup(self, trainers):
"""
Set up training extension.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
def on_save(self, trainers):
"""
Called by TrainCV just before saving models.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
class MonitorBasedSaveBestCV(TrainCVExtension):
"""
Save best model for each cross-validation fold. Based on
train_extensions.best_params.MonitorBasedSaveBest.
Parameters
----------
channel_name : str
Channel to monitor.
save_path : str or None, optional
Output filename. If None (the default), store_best_model must be
true.
store_best_model : bool, optional
Whether to store the best model in memory. If False (the default),
save_path must be defined. Note that the best model from each child
trainer must be accessed through the extensions for that trainer.
higher_is_better : bool, optional
Whether a higher channel value indicates a better model.
tag_key : str, optional
Unique key to associate with the best model. If provided, this key
will be modified to have a unique value for each child model.
save_folds : bool
Whether to write individual files for each cross-validation fold.
Only used if save_path is not None.
"""
def __init__(self, channel_name, save_path=None, store_best_model=False,
higher_is_better=False, tag_key=None, save_folds=False):
self.channel_name = channel_name
assert save_path is not None or store_best_model, (
"Either save_path must be defined or store_best_model must be " +
"True. (Or both.)")
self.save_path = save_path
self.store_best_model = store_best_model
self.higher_is_better = higher_is_better
self.best_cost = np.inf
self.best_model = None
self.tag_key = tag_key
self.save_folds = save_folds
def setup(self, trainers):
"""
Add tracking to all trainers.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
for k, trainer in enumerate(trainers):
if self.save_path is not None and self.save_folds:
path, ext = os.path.splitext(self.save_path)
save_path = path + '-{}'.format(k) + ext
else:
save_path = None
if self.tag_key is not None:
tag_key = '{}-{}'.format(self.tag_key, k)
else:
tag_key = None
extension = MonitorBasedSaveBest(
self.channel_name, save_path=save_path, store_best_model=True,
higher_is_better=self.higher_is_better, tag_key=tag_key)
trainer.extensions.append(extension)
def on_save(self, trainers):
"""
Save best model from each cross-validation fold.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
if self.save_path is None:
return
models = []
for trainer in trainers:
for extension in trainer.extensions:
if isinstance(extension, MonitorBasedSaveBest):
models.append(extension.best_model)
break
assert len(models) == len(trainers)
try:
for trainer in trainers:
trainer.dataset._serialization_guard = SerializationGuard()
serial.save(self.save_path, models, on_overwrite='backup')
finally:
for trainer in trainers:
trainer.dataset._serialization_guard = None
|
the-stack_0_20417 | """\
Code generator functions for wxNotebook objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common
import wcodegen
class PythonNotebookGenerator(wcodegen.PythonWidgetCodeWriter):
def get_code(self, window):
self._reset_vars()
wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, window)
prop = window.properties
id_name, id = self.codegen.generate_code_id(window)
layout_props = []
for label, tab_win in zip(window.tabs, window.pages):
label = label[0]
if tab_win.klass == "sizerslot":
tab_win = "wx.Panel(self.%s)"%window.name
else:
tab_win = 'self.%s'%tab_win.name
layout_props.append('self.%s.AddPage(%s, %s)\n'%(window.name, tab_win, self.codegen.quote_str(label)))
parent = self.format_widget_access(window.parent)
if window.is_toplevel:
l = []
if id_name:
l.append(id_name)
l.append('self.%s = %s(%s, %s)\n' % (window.name, self.codegen.get_class(window.klass), parent, id))
return l, [], []
klass = window.klass
if self.codegen.preview:
klass = 'wxNotebook'
init = []
if id_name:
init.append(id_name)
init.append(('self.%s = ' + self.cn(klass) + '(%s, %s%s)\n')%(window.name, parent, id, self.tmpl_dict['style']))
props_buf = self.codegen.generate_common_properties(window)
return init, props_buf, layout_props
def get_properties_code(self, obj):
prop = obj.properties
props_buf = []
for label, tab_win in zip(obj.tabs, obj.pages):
label = label[0]
props_buf.append( 'self.AddPage(self.%s, %s)\n' % (tab_win.name, self.codegen.quote_str(label)) )
props_buf.extend(self.codegen.generate_common_properties(obj))
return props_buf
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
from xml.sax.saxutils import escape
class NotebookXrcObject(xrcgen.DefaultXrcObject):
def write(self, output, ntabs, properties=None):
if properties is None: properties = {}
# the "tabs" property contains the pages of a notebook
# be careful: tabs in context of code generation are white spaces used for indenting lines!!
properties["tabs"] = None # don't write
# always use a wxNotebookSizer
properties['usenotebooksizer'] = '1'
properties['no_custom_class'] = None
xrcgen.DefaultXrcObject.write(self, output, ntabs, properties)
def write_child_prologue(self, child, output, ntabs):
if self.widget.pages:
label = self.widget.tabs[child.widget.pos-1][0] # pos is 1-based
tab_s = ' ' * ntabs
output.append( tab_s + '<object class="notebookpage">\n' )
output.append( tab_s + '<label>%s</label>\n' % escape(label) )
def write_child_epilogue(self, child, output, ntabs):
if self.widget.pages:
output.append( ' '*ntabs + '</object>\n' )
return NotebookXrcObject(obj)
class CppNotebookGenerator(wcodegen.CppWidgetCodeWriter):
constructor = [('wxWindow*', 'parent'), ('wxWindowID', 'id'),
('const wxPoint&', 'pos', 'wxDefaultPosition'),
('const wxSize&', 'size', 'wxDefaultSize'),
('long', 'style', '0')]
import_modules = ['<wx/notebook.h>']
def get_code(self, window):
"generates the C++ code for wxNotebook"
self._reset_vars()
wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, window)
prop = window.properties
id_name, id = self.codegen.generate_code_id(window)
if id_name:
ids = [id_name]
else:
ids = []
layout_props = []
for label, tab_win in zip(window.tabs, window.pages):
label = label[0]
layout_props.append('%s->AddPage(%s, %s);\n' % (window.name, tab_win.name, self.codegen.quote_str(label)))
if not window.parent.is_toplevel:
parent = '%s' % window.parent.name
else:
parent = 'this'
if window.is_toplevel:
l = ['%s = new %s(%s, %s);\n' % (window.name, window.klass, parent, id)]
return l, ids, [], []
init = ['%s = new %s(%s, %s%s);\n' % (window.name, window.klass, parent, id, self.tmpl_dict['style'])]
props_buf = self.codegen.generate_common_properties(window)
return init, ids, props_buf, layout_props
def get_properties_code(self, obj):
prop = obj.properties
props_buf = []
for label, tab_win in zip(obj.tabs, obj.pages):
label = label[0]
props_buf.append( 'AddPage(%s, %s);\n' % (tab_win.name, self.codegen.quote_str(label)) )
props_buf.extend(self.codegen.generate_common_properties(obj))
return props_buf
def initialize():
klass = 'wxNotebook'
common.class_names['EditNotebook'] = klass
common.class_names['NotebookPane'] = 'wxPanel'
common.toplevels['EditNotebook'] = 1
common.toplevels['NotebookPane'] = 1
common.register('python', klass, PythonNotebookGenerator(klass) )
common.register('C++', klass, CppNotebookGenerator(klass) )
common.register('XRC', klass, xrc_code_generator )
|
the-stack_0_20418 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import errno
import os
import shutil
import zipfile
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from textwrap import dedent
from pex import pex_warnings
from pex.common import chmod_plus_x, pluralize, safe_mkdir
from pex.environment import PEXEnvironment
from pex.pex import PEX
from pex.tools.command import Command, Error, Ok, Result
from pex.tools.commands.virtualenv import PipUnavailableError, Virtualenv
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING
from pex.venv_bin_path import BinPath
if TYPE_CHECKING:
from typing import Iterable, Iterator, Optional, Tuple
# N.B.: We can't use shutil.copytree since we copy from multiple source locations to the same site
# packages directory destination. Since we're forced to stray from the stdlib here, support for
# hardlinks is added to provide a measurable speed up and disk space savings when possible.
def _copytree(
src, # type: str
dst, # type: str
exclude=(), # type: Tuple[str, ...]
):
# type: (...) -> Iterator[Tuple[str, str]]
safe_mkdir(dst)
link = True
for root, dirs, files in os.walk(src, topdown=True, followlinks=False):
if src == root:
dirs[:] = [d for d in dirs if d not in exclude]
files[:] = [f for f in files if f not in exclude]
for d in dirs:
try:
os.mkdir(os.path.join(dst, os.path.relpath(os.path.join(root, d), src)))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
for f in files:
src_entry = os.path.join(root, f)
dst_entry = os.path.join(dst, os.path.relpath(src_entry, src))
yield src_entry, dst_entry
try:
# We only try to link regular files since linking a symlink on Linux can produce
# another symlink, which leaves open the possibility the src_entry target could
# later go missing leaving the dst_entry dangling.
if link and not os.path.islink(src_entry):
try:
os.link(src_entry, dst_entry)
continue
except OSError as e:
if e.errno != errno.EXDEV:
raise e
link = False
shutil.copy(src_entry, dst_entry)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
class CollisionError(Exception):
"""Indicates multiple distributions provided the same file when merging a PEX into a venv."""
def populate_venv_with_pex(
venv, # type: Virtualenv
pex, # type: PEX
bin_path=BinPath.FALSE, # type: BinPath.Value
python=None, # type: Optional[str]
collisions_ok=True, # type: bool
):
# type: (...) -> str
venv_python = python or venv.interpreter.binary
venv_bin_dir = os.path.dirname(python) if python else venv.bin_dir
venv_dir = os.path.dirname(venv_bin_dir) if python else venv.venv_dir
# 1. Populate the venv with the PEX contents.
provenance = defaultdict(list)
def record_provenance(src_to_dst):
# type: (Iterable[Tuple[str, str]]) -> None
for src, dst in src_to_dst:
provenance[dst].append(src)
pex_info = pex.pex_info()
if zipfile.is_zipfile(pex.path()):
record_provenance(
PEXEnvironment(pex.path()).explode_code(
venv.site_packages_dir, exclude=("__main__.py", pex_info.PATH)
)
)
else:
record_provenance(
_copytree(
src=pex.path(),
dst=venv.site_packages_dir,
exclude=(pex_info.internal_cache, pex_info.bootstrap, "__main__.py", pex_info.PATH),
)
)
with open(os.path.join(venv.venv_dir, pex_info.PATH), "w") as fp:
fp.write(pex_info.dump())
for dist in pex.resolve():
record_provenance(
_copytree(src=dist.location, dst=venv.site_packages_dir, exclude=("bin",))
)
dist_bin_dir = os.path.join(dist.location, "bin")
if os.path.isdir(dist_bin_dir):
record_provenance(_copytree(dist_bin_dir, venv.bin_dir))
collisions = {dst: srcs for dst, srcs in provenance.items() if len(srcs) > 1}
if collisions:
message_lines = [
"Encountered {collision} building venv at {venv_dir} from {pex}:".format(
collision=pluralize(collisions, "collision"), venv_dir=venv_dir, pex=pex.path()
)
]
for index, (dst, srcs) in enumerate(collisions.items(), start=1):
message_lines.append(
"{index}. {dst} was provided by:\n\t{srcs}".format(
index=index, dst=dst, srcs="\n\t".join(srcs)
)
)
message = "\n".join(message_lines)
if not collisions_ok:
raise CollisionError(message)
pex_warnings.warn(message)
# 2. Add a __main__ to the root of the venv for running the venv dir like a loose PEX dir
# and a main.py for running as a script.
shebang = "#!{} -sE".format(venv_python)
main_contents = dedent(
"""\
{shebang}
if __name__ == "__main__":
import os
import sys
venv_dir = os.path.abspath(os.path.dirname(__file__))
venv_bin_dir = os.path.join(venv_dir, "bin")
shebang_python = {shebang_python!r}
python = os.path.join(venv_bin_dir, os.path.basename(shebang_python))
def iter_valid_venv_pythons():
# Allow for both the known valid venv pythons and their fully resolved venv path
# version in the case their parent directories contain symlinks.
for python_binary in (python, shebang_python):
yield python_binary
yield os.path.join(
os.path.realpath(os.path.dirname(python_binary)),
os.path.basename(python_binary)
)
current_interpreter_blessed_env_var = "_PEX_SHOULD_EXIT_VENV_REEXEC"
if (
not os.environ.pop(current_interpreter_blessed_env_var, None)
and sys.executable not in tuple(iter_valid_venv_pythons())
):
sys.stderr.write("Re-execing from {{}}\\n".format(sys.executable))
os.environ[current_interpreter_blessed_env_var] = "1"
os.execv(python, [python, "-sE"] + sys.argv)
ignored_pex_env_vars = [
"{{}}={{}}".format(name, value)
for name, value in os.environ.items()
if name.startswith(("PEX_", "_PEX_", "__PEX_")) and name not in (
# These are used inside this script.
"_PEX_SHOULD_EXIT_VENV_REEXEC",
"PEX_EXTRA_SYS_PATH",
"PEX_VENV_BIN_PATH",
"PEX_INTERPRETER",
"PEX_SCRIPT",
"PEX_MODULE",
# This is used when loading ENV (Variables()):
"PEX_IGNORE_RCFILES",
# And ENV is used to access these during PEX bootstrap when delegating here via
# a --venv mode PEX file.
"PEX_ROOT",
"PEX_VENV",
"PEX_PATH",
"PEX_PYTHON",
"PEX_PYTHON_PATH",
"PEX_VERBOSE",
"__PEX_EXE__",
"__PEX_UNVENDORED__",
# This is _not_ used (it is ignored), but it's present under CI and simplest to
# add an exception for here and not warn about in CI runs.
"_PEX_TEST_PYENV_ROOT",
)
]
if ignored_pex_env_vars:
sys.stderr.write(
"Ignoring the following environment variables in Pex venv mode:\\n"
"{{}}\\n\\n".format(
os.linesep.join(sorted(ignored_pex_env_vars))
)
)
os.environ["VIRTUAL_ENV"] = venv_dir
sys.path.extend(os.environ.get("PEX_EXTRA_SYS_PATH", "").split(os.pathsep))
bin_path = os.environ.get("PEX_VENV_BIN_PATH", {bin_path!r})
if bin_path != "false":
PATH = os.environ.get("PATH", "").split(os.pathsep)
if bin_path == "prepend":
PATH.insert(0, venv_bin_dir)
elif bin_path == "append":
PATH.append(venv_bin_dir)
else:
sys.stderr.write(
"PEX_VENV_BIN_PATH must be one of 'false', 'prepend' or 'append', given: "
"{{!r}}\\n".format(
bin_path
)
)
sys.exit(1)
os.environ["PATH"] = os.pathsep.join(PATH)
PEX_EXEC_OVERRIDE_KEYS = ("PEX_INTERPRETER", "PEX_SCRIPT", "PEX_MODULE")
pex_overrides = {{
key: os.environ.get(key) for key in PEX_EXEC_OVERRIDE_KEYS if key in os.environ
}}
if len(pex_overrides) > 1:
sys.stderr.write(
"Can only specify one of {{overrides}}; found: {{found}}\\n".format(
overrides=", ".join(PEX_EXEC_OVERRIDE_KEYS),
found=" ".join("{{}}={{}}".format(k, v) for k, v in pex_overrides.items())
)
)
sys.exit(1)
if {strip_pex_env!r}:
for key in list(os.environ):
if key.startswith("PEX_"):
del os.environ[key]
pex_script = pex_overrides.get("PEX_SCRIPT")
if pex_script:
script_path = os.path.join(venv_bin_dir, pex_script)
os.execv(script_path, [script_path] + sys.argv[1:])
pex_interpreter = pex_overrides.get("PEX_INTERPRETER", "").lower() in ("1", "true")
PEX_INTERPRETER_ENTRYPOINT = "code:interact"
entry_point = (
PEX_INTERPRETER_ENTRYPOINT
if pex_interpreter
else pex_overrides.get("PEX_MODULE", {entry_point!r} or PEX_INTERPRETER_ENTRYPOINT)
)
if entry_point == PEX_INTERPRETER_ENTRYPOINT and len(sys.argv) > 1:
args = sys.argv[1:]
arg = args[0]
if arg == "-m":
if len(args) < 2:
sys.stderr.write("Argument expected for the -m option\\n")
sys.exit(2)
entry_point = module = args[1]
sys.argv = args[1:]
# Fall through to entry_point handling below.
else:
filename = arg
sys.argv = args
if arg == "-c":
if len(args) < 2:
sys.stderr.write("Argument expected for the -c option\\n")
sys.exit(2)
filename = "-c <cmd>"
content = args[1]
sys.argv = ["-c"] + args[2:]
elif arg == "-":
content = sys.stdin.read()
else:
with open(arg) as fp:
content = fp.read()
ast = compile(content, filename, "exec", flags=0, dont_inherit=1)
globals_map = globals().copy()
globals_map["__name__"] = "__main__"
globals_map["__file__"] = filename
locals_map = globals_map
{exec_ast}
sys.exit(0)
module_name, _, function = entry_point.partition(":")
if not function:
import runpy
runpy.run_module(module_name, run_name="__main__", alter_sys=True)
else:
import importlib
module = importlib.import_module(module_name)
# N.B.: Functions may be hung off top-level objects in the module namespace,
# e.g.: Class.method; so we drill down through any attributes to the final function
# object.
namespace, func = module, None
for attr in function.split("."):
func = namespace = getattr(namespace, attr)
sys.exit(func())
""".format(
shebang=shebang,
shebang_python=venv_python,
bin_path=bin_path,
strip_pex_env=pex_info.strip_pex_env,
entry_point=pex_info.entry_point,
exec_ast=(
"exec ast in globals_map, locals_map"
if venv.interpreter.version[0] == 2
else "exec(ast, globals_map, locals_map)"
),
)
)
with open(venv.join_path("__main__.py"), "w") as fp:
fp.write(main_contents)
chmod_plus_x(fp.name)
os.symlink(os.path.basename(fp.name), venv.join_path("pex"))
# 3. Re-write any (console) scripts to use the venv Python.
for script in venv.rewrite_scripts(python=venv_python, python_args="-sE"):
TRACER.log("Re-writing {}".format(script))
return shebang
class Venv(Command):
"""Creates a venv from the PEX file."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument(
"venv",
nargs=1,
metavar="PATH",
help="The directory to create the virtual environment in.",
)
parser.add_argument(
"-b",
"--bin-path",
choices=[choice.value for choice in BinPath.values],
default=BinPath.FALSE.value,
help="Add the venv bin dir to the PATH in the __main__.py script.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="If the venv directory already exists, overwrite it.",
)
parser.add_argument(
"--collisions-ok",
action="store_true",
default=False,
help=(
"Don't error if population of the venv encounters distributions in the PEX file "
"with colliding files, just emit a warning."
),
)
parser.add_argument(
"-p",
"--pip",
action="store_true",
default=False,
help="Add pip to the venv.",
)
parser.add_argument(
"--copies",
action="store_true",
default=False,
help="Create the venv using copies of system files instead of symlinks",
)
parser.add_argument(
"--compile",
action="store_true",
default=False,
help="Compile all `.py` files in the venv.",
)
def run(
self,
pex, # type: PEX
options, # type: Namespace
):
# type: (...) -> Result
venv_dir = options.venv[0]
venv = Virtualenv.create(
venv_dir, interpreter=pex.interpreter, force=options.force, copies=options.copies
)
populate_venv_with_pex(
venv,
pex,
bin_path=BinPath.for_value(options.bin_path),
collisions_ok=options.collisions_ok,
)
if options.pip:
try:
venv.install_pip()
except PipUnavailableError as e:
return Error(
"The virtual environment was successfully created, but Pip was not "
"installed:\n{}".format(e)
)
if options.compile:
pex.interpreter.execute(["-m", "compileall", venv_dir])
return Ok()
|
the-stack_0_20419 | from collections import OrderedDict
import pytest
import torch
import syft
from syft.serde import protobuf
from test.serde.serde_helpers import *
# Dictionary containing test samples functions
samples = OrderedDict()
# Native
samples[type(None)] = make_none
samples[type] = make_type
# PyTorch
samples[torch.device] = make_torch_device
samples[torch.jit.ScriptModule] = make_torch_scriptmodule
samples[torch.jit.ScriptFunction] = make_torch_scriptfunction
samples[torch.jit.TopLevelTracedModule] = make_torch_topleveltracedmodule
samples[torch.nn.Parameter] = make_torch_parameter
samples[torch.Tensor] = make_torch_tensor
samples[torch.Size] = make_torch_size
samples[torch.memory_format] = make_torch_memoryformat
samples[torch.dtype] = make_torch_dtype
# PySyft
samples[
syft.frameworks.torch.tensors.interpreters.additive_shared.AdditiveSharingTensor
] = make_additivesharingtensor
samples[syft.execution.placeholder.PlaceHolder] = make_placeholder
samples[syft.execution.computation.ComputationAction] = make_computation_action
samples[syft.execution.communication.CommunicationAction] = make_communication_action
samples[syft.execution.plan.Plan] = make_plan
samples[syft.execution.protocol.Protocol] = make_protocol
samples[syft.execution.role.Role] = make_role
samples[syft.execution.state.State] = make_state
samples[syft.execution.placeholder_id.PlaceholderId] = make_placeholder_id
samples[syft.execution.plan.NestedTypeWrapper] = make_nested_type_wrapper
samples[syft.generic.pointers.pointer_tensor.PointerTensor] = make_pointertensor
samples[syft.generic.pointers.pointer_dataset.PointerDataset] = make_pointerdataset
samples[syft.generic.string.String] = make_string
# Syft Messages
samples[syft.messaging.message.ObjectMessage] = make_objectmessage
samples[syft.messaging.message.TensorCommandMessage] = make_tensor_command_message
def test_serde_coverage():
"""Checks all types in serde are tested"""
for cls, _ in protobuf.serde.protobuf_global_state.bufferizers.items():
has_sample = cls in samples
assert has_sample, f"Serde for {cls} is not tested"
@pytest.mark.parametrize("cls", samples)
def test_serde_roundtrip_protobuf(cls, workers, hook):
"""Checks that values passed through serialization-deserialization stay same"""
serde_worker = syft.VirtualWorker(id=f"serde-worker-{cls.__name__}", hook=hook, auto_add=False)
original_framework = serde_worker.framework
workers["serde_worker"] = serde_worker
_samples = samples[cls](workers=workers)
for sample in _samples:
_to_protobuf = (
protobuf.serde._bufferize
if not sample.get("forced", False)
else protobuf.serde._force_full_bufferize
)
serde_worker.framework = sample.get("framework", torch)
obj = sample.get("value")
protobuf_obj = _to_protobuf(serde_worker, obj)
roundtrip_obj = None
if not isinstance(obj, Exception):
roundtrip_obj = protobuf.serde._unbufferize(serde_worker, protobuf_obj)
serde_worker.framework = original_framework
if sample.get("cmp_detailed", None):
# Custom detailed objects comparison function.
assert sample.get("cmp_detailed")(roundtrip_obj, obj)
else:
assert type(roundtrip_obj) == type(obj)
assert roundtrip_obj == obj
|
the-stack_0_20422 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
List all Windows services installed.
$ python scripts/winservices.py
AeLookupSvc (Application Experience)
status: stopped, start: manual, username: localSystem, pid: None
binpath: C:\Windows\system32\svchost.exe -k netsvcs
ALG (Application Layer Gateway Service)
status: stopped, start: manual, username: NT AUTHORITY\LocalService, pid: None
binpath: C:\Windows\System32\alg.exe
APNMCP (Ask Update Service)
status: running, start: automatic, username: LocalSystem, pid: 1108
binpath: "C:\Program Files (x86)\AskPartnerNetwork\Toolbar\apnmcp.exe"
AppIDSvc (Application Identity)
status: stopped, start: manual, username: NT Authority\LocalService, pid: None
binpath: C:\Windows\system32\svchost.exe -k LocalServiceAndNoImpersonation
Appinfo (Application Information)
status: stopped, start: manual, username: LocalSystem, pid: None
binpath: C:\Windows\system32\svchost.exe -k netsvcs
...
"""
import os
import sys
import psutil
if os.name != 'nt':
sys.exit("platform not supported (Windows only)")
def main():
for service in psutil.win_service_iter():
info = service.as_dict()
print("%r (%r)" % (info['name'], info['display_name']))
print("status: %s, start: %s, username: %s, pid: %s" % (
info['status'], info['start_type'], info['username'], info['pid']))
print("binpath: %s" % info['binpath'])
print("")
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_20424 | from django.shortcuts import render
from django.http import HttpResponse
from django.core.mail import send_mail, BadHeaderError
from .forms import ContactForm
def homepage(request):
form = ContactForm()
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = f'Message from {form.cleaned_data["name"]} - {form.cleaned_data["subject"]}'
message = form.cleaned_data['message']
sender = form.cleaned_data['email']
recipients = ['[email protected]']
try:
send_mail(subject, message, sender,
recipients, fail_silently=True)
except BadHeaderError:
return HttpResponse('Invalid header found')
return render(request, 'main/success.html', {})
return render(request, 'main/home.html', {'form': form})
|
the-stack_0_20430 | import uuid
from django.db import models
from django.utils import timezone
from django.contrib.postgres.fields import ArrayField
from django.db.models import JSONField
from data.utils import get_airtable_media_name, get_airtable_media_content_file
from .practicegroup import PracticeGroup
from .practicetype import PracticeType, PracticeTypeCategory
from .mechanism import Mechanism
from .resource import Resource
from .problem import Problem
from .glyphosateuses import GlyphosateUses
class Practice(models.Model):
"""
This model represents an agricultural practice that we may suggest to
the user.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
external_id = models.CharField(max_length=100)
modification_date = models.DateTimeField(auto_now=True)
creation_date = models.DateTimeField(default=timezone.now)
image = models.ImageField(null=True)
airtable_json = JSONField(null=True, blank=True)
airtable_url = models.TextField(null=True, blank=True)
title = models.TextField(null=True, blank=True)
short_title = models.TextField()
description = models.TextField(null=True, blank=True)
mechanism = models.ForeignKey(Mechanism, null=True, blank=True, on_delete=models.SET_NULL)
equipment = models.TextField(null=True, blank=True)
schedule = models.TextField(null=True, blank=True)
impact = models.TextField(null=True, blank=True)
additional_benefits = models.TextField(null=True, blank=True)
success_factors = models.TextField(null=True, blank=True)
needs_shallow_tillage = models.BooleanField(blank=True, null=True)
needs_deep_tillage = models.BooleanField(blank=True, null=True)
weed_whitelist_external_ids = ArrayField(models.TextField(), default=list)
pest_whitelist_external_ids = ArrayField(models.TextField(), default=list)
balances_sowing_period = models.BooleanField(blank=True, null=True)
# Practices can have one main resource and several secondary ones
main_resource_label = models.TextField(null=True, blank=True)
main_resource = models.ForeignKey(Resource, null=True, blank=True, on_delete=models.SET_NULL, related_name="main_practices", related_query_name="main_practice",)
secondary_resources = models.ManyToManyField(Resource)
# A practice can be part of practice groups - which are groups that
# refer to the same action but with different levels of specificity.
# We should not propose practices from the same practice group.
practice_groups = models.ManyToManyField(PracticeGroup)
# Whether or not the practice needs tillage (travail du sol)
needs_tillage = models.BooleanField(null=True)
# Whether or not the practice needs livestock
needs_livestock = models.BooleanField(null=True)
# If greater than 1, the practice will be boosted if the user has livestock
# or the possibility to monetize selling animal food. If lower than 1, the
# practice will be penalized if the user has livestock. A value of 1 does not
# modify the value of this practice in presence of livestock
livestock_multiplier = models.DecimalField(null=True, max_digits=7, decimal_places=6)
# If greater than 1, the practice will be boosted if the user has access
# to a direct end-consumer sale. If lower than 1, the practice will be penalized
# if the user has access to end-consumer sale. A value of 1 does not
# modify the value of this practice in presence of end-consumer sale markets.
direct_sale_multiplier = models.DecimalField(null=True, max_digits=7, decimal_places=6)
# The degree at which the practice is precise (0 to 1) - meaning how many decisions must
# the user take on their own in order to implement this practice. A vague practice
# e.g., "Make use of better equipment" will have a low precision, whereas a
# descriptive practice "Make use of a Cataya mechanic seeder combined with a rotative KE
# to place the corn seeds at 12cm apart from each other" will have a high precision value.
precision = models.DecimalField(null=True, max_digits=7, decimal_places=6)
# The degree at which the practice is difficult (0 to 1) - meaning how high is the barrier
# on the user's side in order to implement this practice.
difficulty = models.DecimalField(null=True, max_digits=7, decimal_places=6)
# If this practice adresses particular types of agriculture problem specified in the
# Problem Enum, this field will store these adressed problems.
problems_addressed = ArrayField(models.IntegerField(), blank=True, null=True)
# If this practice corresponds to types available in the PracticeType enum, this field will
# store them.
types = models.ManyToManyField(PracticeType)
# The following fields are multipliers and will boost or handicap the practice depending
# on the value. A value larger than 1 will boost the practice, whereas a value lower than 1
# will handicap it. A value equal to 1 will not make a difference.
# E.g., [{'75': 1.003}, {'69': 0.7329}]
department_multipliers = ArrayField(JSONField(), blank=True, null=True)
# E.g., [{1: 1.003}, {5: 0.7329}]
glyphosate_multipliers = ArrayField(JSONField(), blank=True, null=True)
# E.g., [{'ARGILEUX': 1.0024}, {'LIMONEUX': 0.6362}]
soil_type_multipliers = ArrayField(JSONField(), blank=True, null=True)
# E.g., [{'ARGILEUX': 1.0024}, {'LIMONEUX': 0.6362}]
soil_type_multipliers = ArrayField(JSONField(), blank=True, null=True)
# E.g., [{'ARGILEUX': 1.0024}, {'LIMONEUX': 0.6362}]
soil_type_multipliers = ArrayField(JSONField(), blank=True, null=True)
# Uses external ID as key
# E.g., [{'recjzIBqwGkton9Ed': 1.0024}, {'recjzIAuvEkton9Ed': 0.6362}]
weed_multipliers = ArrayField(JSONField(), blank=True, null=True)
# Uses external ID as key
# E.g., [{'recjzIBqwGkton9Ed': 1.0024}, {'recjzIAuvEkton9Ed': 0.6362}]
pest_multipliers = ArrayField(JSONField(), blank=True, null=True)
# If this practice involves adding new cultures to the rotation, this field specifies which
# cultures are being added. These are culture external IDs.
added_cultures = ArrayField(models.TextField(), blank=True, null=True)
# If this practice is relevant only for certain types of cultures, they should be specified
# here. If the practice could be applied to any kind of culture this field should remain
# empty. These are culture external IDs.
culture_whitelist = ArrayField(models.TextField(), blank=True, null=True)
# E.g., [{'recjzIAuvEkton9Ed': 1.003}, {'recjzIArvEkton9Ds': 0.7329}]
culture_multipliers = ArrayField(JSONField(), blank=True, null=True)
@staticmethod
def create_from_airtable(airtable_json, json_culture_practices, json_departments_practices,
json_departments, json_glyphosate, json_glyphosate_practices, mechanisms,
resources, json_practice_types, json_weeds, json_weed_practices, json_pests, json_pest_practices):
fields = airtable_json['fields']
mechanism_external_ids = fields.get('Marges de manoeuvre', [])
resource_external_ids = fields.get('CTA lien', [])
practice_types = [x for x in json_practice_types if x['id'] in fields.get('Types')]
needs_shallow_tillage = any([x for x in practice_types if x['fields'].get('Enum code') == PracticeTypeCategory.TRAVAIL_DU_SOL.name])
needs_deep_tillage = any([x for x in practice_types if x['fields'].get('Enum code') == PracticeTypeCategory.TRAVAIL_PROFOND.name])
practice = Practice(
external_id=airtable_json.get('id'),
mechanism=next((x for x in mechanisms if x.external_id in mechanism_external_ids), None),
main_resource=next((x for x in resources if x.external_id in resource_external_ids), None),
main_resource_label=fields.get('CTA title'),
airtable_json=airtable_json,
airtable_url='https://airtable.com/tblobpdQDxkzcllWo/' + airtable_json.get('id') + '/',
title=fields.get('Nom').strip(),
short_title=fields.get('Nom court').strip(),
description=fields.get('Description'),
equipment=fields.get('Matériel'),
schedule=fields.get('Période de travail'),
impact=fields.get('Impact'),
additional_benefits=fields.get('Bénéfices supplémentaires'),
success_factors=fields.get('Facteur clé de succès'),
needs_tillage=fields.get('Nécessite travail du sol', False),
livestock_multiplier=fields.get('Élevage multiplicateur'),
needs_livestock=fields.get('Élevage nécessaire', False),
balances_sowing_period=fields.get('Équilibre période semis', False),
direct_sale_multiplier=fields.get('Vente directe multiplicateur'),
precision=fields.get('Précision'),
difficulty=fields.get('Difficulté'),
added_cultures=fields.get('Ajout dans la rotation cultures'),
culture_whitelist=fields.get('Cultures whitelist'),
problems_addressed=Practice._get_problems_addressed(airtable_json),
department_multipliers=Practice._get_department_multipliers(airtable_json, json_departments_practices, json_departments),
glyphosate_multipliers=Practice._get_glyphosate_multipliers(airtable_json, json_glyphosate, json_glyphosate_practices),
culture_multipliers=Practice._get_culture_multipliers(airtable_json, json_culture_practices),
needs_shallow_tillage=needs_shallow_tillage,
needs_deep_tillage=needs_deep_tillage,
weed_multipliers=Practice._get_weed_multipliers(airtable_json, json_weeds, json_weed_practices),
pest_multipliers=Practice._get_pest_multipliers(airtable_json, json_pests, json_pest_practices),
weed_whitelist_external_ids=fields.get('Adventices whitelist', []),
pest_whitelist_external_ids=fields.get('Ravageurs whitelist', []),
)
image_name = get_airtable_media_name(airtable_json, 'Image principale')
image_content_file = get_airtable_media_content_file(airtable_json, 'Image principale')
if image_name and image_content_file:
practice.image.save(image_name, image_content_file, save=True)
return practice
@staticmethod
def _get_problems_addressed(airtable_json):
fields = airtable_json['fields']
airtable_adressed_problems = fields.get('Problèmes adressés', [])
problems = []
for airtable_problem in airtable_adressed_problems:
try:
problems.append(Problem[airtable_problem].value)
except KeyError as _:
continue
return problems
@staticmethod
def _get_department_multipliers(airtable_json, json_departments_practices, json_departments):
departments = json_departments
departments_practices = json_departments_practices
practice = airtable_json
concerned_department_practices = list(filter(lambda x: practice['id'] in (x['fields'].get('Pratique') or []), departments_practices))
department_multipliers = []
for item in concerned_department_practices:
if not item['fields'].get('Departement'):
continue
department_airtable_id = item['fields'].get('Departement')[0]
airtable_department_entry = next(filter(lambda x: x['id'] == department_airtable_id, departments), None)
if not airtable_department_entry or not airtable_department_entry['fields'].get('Numéro'):
continue
department_number = airtable_department_entry['fields'].get('Numéro')
multiplier = item['fields'].get('Multiplicateur') or 1
department_multipliers.append({
department_number: multiplier
})
return department_multipliers
@staticmethod
def _get_glyphosate_multipliers(airtable_json, json_glyphosate, json_glyphosate_practices):
concerned_glyphosate_practices = list(filter(lambda x: airtable_json['id'] in (x['fields'].get('Pratique') or []), json_glyphosate_practices))
glyphosate_multipliers = []
for item in concerned_glyphosate_practices:
if not item['fields'].get('Glyphosate'):
continue
glyphosate_airtable_id = item['fields'].get('Glyphosate')[0]
airtable_glyphosate_entry = next(filter(lambda x: x['id'] == glyphosate_airtable_id, json_glyphosate), None)
if not airtable_glyphosate_entry or not airtable_glyphosate_entry['fields'].get('Enum code'):
continue
try:
glyphosate_enum_number = GlyphosateUses[airtable_glyphosate_entry['fields'].get('Enum code')].value
multiplier = item['fields'].get('Multiplicateur') or 1
glyphosate_multipliers.append({
glyphosate_enum_number: multiplier
})
except KeyError as _:
continue
return glyphosate_multipliers
@staticmethod
def _get_culture_multipliers(airtable_json, json_culture_practices):
concerned_culture_practices = list(filter(lambda x: airtable_json['id'] in (x['fields'].get('Pratique') or []), json_culture_practices))
culture_multipliers = []
for item in concerned_culture_practices:
if not item['fields'].get('Culture'):
continue
culture_airtable_id = item['fields'].get('Culture')[0]
multiplier = item['fields'].get('Multiplicateur') or 1
culture_multipliers.append({
culture_airtable_id: multiplier
})
return culture_multipliers
@staticmethod
def _get_weed_multipliers(airtable_json, json_weeds, json_weed_practices):
concerned_weed_practices = list(filter(lambda x: airtable_json['id'] in (x['fields'].get('Pratique') or []), json_weed_practices))
weed_multipliers = []
for weed_practice in concerned_weed_practices:
if not weed_practice['fields'].get('Adventice') or not weed_practice['fields'].get('Multiplicateur'):
continue
weed_airtable_id = weed_practice['fields'].get('Adventice')[0]
weed = next(filter(lambda x: x['id'] == weed_airtable_id, json_weeds), None)
weed_multipliers.append({
weed['id']: weed_practice['fields'].get('Multiplicateur') or 1
})
return weed_multipliers
@staticmethod
def _get_pest_multipliers(airtable_json, json_pests, json_pest_practices):
concerned_pest_practices = list(filter(lambda x: airtable_json['id'] in (x['fields'].get('Pratique') or []), json_pest_practices))
pest_multipliers = []
for pest_practice in concerned_pest_practices:
if not pest_practice['fields'].get('Ravageur') or not pest_practice['fields'].get('Multiplicateur'):
continue
pest_airtable_id = pest_practice['fields'].get('Ravageur')[0]
pest = next(filter(lambda x: x['id'] == pest_airtable_id, json_pests), None)
pest_multipliers.append({
pest['id']: pest_practice['fields'].get('Multiplicateur') or 1
})
return pest_multipliers |
the-stack_0_20434 | #!/usr/bin/env python
# encoding: UTF-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# SvnWcSub - Subscribe to a SvnPubSub stream, and keep a set of working copy
# paths in sync
#
# Example:
# svnwcsub.py svnwcsub.conf
#
# On startup svnwcsub checks the working copy's path, runs a single svn update
# and then watches for changes to that path.
#
# See svnwcsub.conf for more information on its contents.
#
# TODO:
# - bulk update at startup time to avoid backlog warnings
# - fold BDEC into Daemon
# - fold WorkingCopy._get_match() into __init__
# - remove wc_ready(). assume all WorkingCopy instances are usable.
# place the instances into .watch at creation. the .update_applies()
# just returns if the wc is disabled (eg. could not find wc dir)
# - figure out way to avoid the ASF-specific PRODUCTION_RE_FILTER
# (a base path exclusion list should work for the ASF)
# - add support for SIGHUP to reread the config and reinitialize working copies
# - joes will write documentation for svnpubsub as these items become fulfilled
# - make LOGLEVEL configurable
import errno
import subprocess
import threading
import sys
import stat
import os
import re
import posixpath
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import time
import logging.handlers
try:
import Queue
except ImportError:
import queue as Queue
import optparse
import functools
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import daemonize
import svnpubsub.client
import svnpubsub.util
assert hasattr(subprocess, 'check_call')
def check_call(*args, **kwds):
"""Wrapper around subprocess.check_call() that logs stderr upon failure,
with an optional list of exit codes to consider non-failure."""
assert 'stderr' not in kwds
if '__okayexits' in kwds:
__okayexits = kwds['__okayexits']
del kwds['__okayexits']
else:
__okayexits = set([0]) # EXIT_SUCCESS
kwds.update(stderr=subprocess.PIPE)
pipe = subprocess.Popen(*args, **kwds)
output, errput = pipe.communicate()
if pipe.returncode not in __okayexits:
cmd = args[0] if len(args) else kwds.get('args', '(no command)')
# TODO: log stdout too?
logging.error('Command failed: returncode=%d command=%r stderr=%r',
pipe.returncode, cmd, errput)
raise subprocess.CalledProcessError(pipe.returncode, args)
return pipe.returncode # is EXIT_OK
### note: this runs synchronously. within the current Twisted environment,
### it is called from ._get_match() which is run on a thread so it won't
### block the Twisted main loop.
def svn_info(svnbin, env, path):
"Run 'svn info' on the target path, returning a dict of info data."
args = [svnbin, "info", "--non-interactive", "--", path]
output = svnpubsub.util.check_output(args, env=env).strip()
info = { }
for line in output.split('\n'):
idx = line.index(':')
info[line[:idx]] = line[idx+1:].strip()
return info
try:
import glob
glob.iglob
def is_emptydir(path):
# ### If the directory contains only dotfile children, this will readdir()
# ### the entire directory. But os.readdir() is not exposed to us...
for x in glob.iglob('%s/*' % path):
return False
for x in glob.iglob('%s/.*' % path):
return False
return True
except (ImportError, AttributeError):
# Python ≤2.4
def is_emptydir(path):
# This will read the entire directory list to memory.
return not os.listdir(path)
class WorkingCopy(object):
def __init__(self, bdec, path, url):
self.path = path
self.url = url
try:
self.match, self.uuid = self._get_match(bdec.svnbin, bdec.env)
bdec.wc_ready(self)
except:
logging.exception('problem with working copy: %s', path)
def update_applies(self, uuid, path):
if self.uuid != uuid:
return False
path = str(path)
if path == self.match:
#print "ua: Simple match"
# easy case. woo.
return True
if len(path) < len(self.match):
# path is potentially a parent directory of match?
#print "ua: parent check"
if self.match[0:len(path)] == path:
return True
if len(path) > len(self.match):
# path is potentially a sub directory of match
#print "ua: sub dir check"
if path[0:len(self.match)] == self.match:
return True
return False
def _get_match(self, svnbin, env):
### quick little hack to auto-checkout missing working copies
dotsvn = os.path.join(self.path, ".svn")
if not os.path.isdir(dotsvn) or is_emptydir(dotsvn):
logging.info("autopopulate %s from %s" % (self.path, self.url))
check_call([svnbin, 'co', '-q',
'--force',
'--non-interactive',
'--config-option',
'config:miscellany:use-commit-times=on',
'--', self.url, self.path],
env=env)
# Fetch the info for matching dirs_changed against this WC
info = svn_info(svnbin, env, self.path)
root = info['Repository Root']
url = info['URL']
relpath = url[len(root):] # also has leading '/'
uuid = info['Repository UUID']
return str(relpath), uuid
PRODUCTION_RE_FILTER = re.compile("/websites/production/[^/]+/")
class BigDoEverythingClasss(object):
def __init__(self, config):
self.svnbin = config.get_value('svnbin')
self.env = config.get_env()
self.tracking = config.get_track()
self.hook = config.get_optional_value('hook')
self.streams = config.get_value('streams').split()
self.worker = BackgroundWorker(self.svnbin, self.env, self.hook)
self.watch = [ ]
def start(self):
for path, url in self.tracking.items():
# working copies auto-register with the BDEC when they are ready.
WorkingCopy(self, path, url)
def wc_ready(self, wc):
# called when a working copy object has its basic info/url,
# Add it to our watchers, and trigger an svn update.
logging.info("Watching WC at %s <-> %s" % (wc.path, wc.url))
self.watch.append(wc)
self.worker.add_work(OP_BOOT, wc)
def _normalize_path(self, path):
if path[0] != '/':
return "/" + path
return posixpath.abspath(path)
def commit(self, url, commit):
if commit.type != 'svn' or commit.format != 1:
logging.info("SKIP unknown commit format (%s.%d)",
commit.type, commit.format)
return
logging.info("COMMIT r%d (%d paths) from %s"
% (commit.id, len(commit.changed), url))
paths = map(self._normalize_path, commit.changed)
if len(paths):
pre = posixpath.commonprefix(paths)
if pre == "/websites/":
# special case for svnmucc "dynamic content" buildbot commits
# just take the first production path to avoid updating all cms working copies
for p in paths:
m = PRODUCTION_RE_FILTER.match(p)
if m:
pre = m.group(0)
break
#print "Common Prefix: %s" % (pre)
wcs = [wc for wc in self.watch if wc.update_applies(commit.repository, pre)]
logging.info("Updating %d WC for r%d" % (len(wcs), commit.id))
for wc in wcs:
self.worker.add_work(OP_UPDATE, wc)
# Start logging warnings if the work backlog reaches this many items
BACKLOG_TOO_HIGH = 20
OP_BOOT = 'boot'
OP_UPDATE = 'update'
OP_CLEANUP = 'cleanup'
class BackgroundWorker(threading.Thread):
def __init__(self, svnbin, env, hook):
threading.Thread.__init__(self)
# The main thread/process should not wait for this thread to exit.
### compat with Python 2.5
self.setDaemon(True)
self.svnbin = svnbin
self.env = env
self.hook = hook
self.q = Queue.Queue()
self.has_started = False
def run(self):
while True:
# This will block until something arrives
operation, wc = self.q.get()
# Warn if the queue is too long.
# (Note: the other thread might have added entries to self.q
# after the .get() and before the .qsize().)
qsize = self.q.qsize()+1
if operation != OP_BOOT and qsize > BACKLOG_TOO_HIGH:
logging.warn('worker backlog is at %d', qsize)
try:
if operation == OP_UPDATE:
self._update(wc)
elif operation == OP_BOOT:
self._update(wc, boot=True)
elif operation == OP_CLEANUP:
self._cleanup(wc)
else:
logging.critical('unknown operation: %s', operation)
except:
logging.exception('exception in worker')
# In case we ever want to .join() against the work queue
self.q.task_done()
def add_work(self, operation, wc):
# Start the thread when work first arrives. Thread-start needs to
# be delayed in case the process forks itself to become a daemon.
if not self.has_started:
self.start()
self.has_started = True
self.q.put((operation, wc))
def _update(self, wc, boot=False):
"Update the specified working copy."
# For giggles, let's clean up the working copy in case something
# happened earlier.
self._cleanup(wc)
logging.info("updating: %s", wc.path)
## Run the hook
HEAD = svn_info(self.svnbin, self.env, wc.url)['Revision']
if self.hook:
hook_mode = ['pre-update', 'pre-boot'][boot]
logging.info('running hook: %s at %s',
wc.path, hook_mode)
args = [self.hook, hook_mode, wc.path, HEAD, wc.url]
rc = check_call(args, env=self.env, __okayexits=[0, 1])
if rc == 1:
# TODO: log stderr
logging.warn('hook denied update of %s at %s',
wc.path, hook_mode)
return
del rc
### we need to move some of these args into the config. these are
### still specific to the ASF setup.
args = [self.svnbin, 'switch',
'--quiet',
'--non-interactive',
'--trust-server-cert',
'--ignore-externals',
'--config-option',
'config:miscellany:use-commit-times=on',
'--',
wc.url + '@' + HEAD,
wc.path]
check_call(args, env=self.env)
### check the loglevel before running 'svn info'?
info = svn_info(self.svnbin, self.env, wc.path)
assert info['Revision'] == HEAD
logging.info("updated: %s now at r%s", wc.path, info['Revision'])
## Run the hook
if self.hook:
hook_mode = ['post-update', 'boot'][boot]
logging.info('running hook: %s at revision %s due to %s',
wc.path, info['Revision'], hook_mode)
args = [self.hook, hook_mode,
wc.path, info['Revision'], wc.url]
check_call(args, env=self.env)
def _cleanup(self, wc):
"Run a cleanup on the specified working copy."
### we need to move some of these args into the config. these are
### still specific to the ASF setup.
args = [self.svnbin, 'cleanup',
'--non-interactive',
'--trust-server-cert',
'--config-option',
'config:miscellany:use-commit-times=on',
wc.path]
check_call(args, env=self.env)
class ReloadableConfig(ConfigParser.SafeConfigParser):
def __init__(self, fname):
ConfigParser.SafeConfigParser.__init__(self)
self.fname = fname
self.read(fname)
### install a signal handler to set SHOULD_RELOAD. BDEC should
### poll this flag, and then adjust its internal structures after
### the reload.
self.should_reload = False
def reload(self):
# Delete everything. Just re-reading would overlay, and would not
# remove sections/options. Note that [DEFAULT] will not be removed.
for section in self.sections():
self.remove_section(section)
# Now re-read the configuration file.
self.read(fname)
def get_value(self, which):
return self.get(ConfigParser.DEFAULTSECT, which)
def get_optional_value(self, which, default=None):
if self.has_option(ConfigParser.DEFAULTSECT, which):
return self.get(ConfigParser.DEFAULTSECT, which)
else:
return default
def get_env(self):
env = os.environ.copy()
default_options = self.defaults().keys()
for name, value in self.items('env'):
if name not in default_options:
env[name] = value
return env
def get_track(self):
"Return the {PATH: URL} dictionary of working copies to track."
track = dict(self.items('track'))
for name in self.defaults().keys():
del track[name]
return track
def optionxform(self, option):
# Do not lowercase the option name.
return str(option)
class Daemon(daemonize.Daemon):
def __init__(self, logfile, pidfile, umask, bdec):
daemonize.Daemon.__init__(self, logfile, pidfile)
self.umask = umask
self.bdec = bdec
def setup(self):
# There is no setup which the parent needs to wait for.
pass
def run(self):
logging.info('svnwcsub started, pid=%d', os.getpid())
# Set the umask in the daemon process. Defaults to 000 for
# daemonized processes. Foreground processes simply inherit
# the value from the parent process.
if self.umask is not None:
umask = int(self.umask, 8)
os.umask(umask)
logging.info('umask set to %03o', umask)
# Start the BDEC (on the main thread), then start the client
self.bdec.start()
mc = svnpubsub.client.MultiClient(self.bdec.streams,
self.bdec.commit,
self._event)
mc.run_forever()
def _event(self, url, event_name, event_arg):
if event_name == 'error':
logging.exception('from %s', url)
elif event_name == 'ping':
logging.debug('ping from %s', url)
else:
logging.info('"%s" from %s', event_name, url)
def prepare_logging(logfile):
"Log to the specified file, or to stdout if None."
if logfile:
# Rotate logs daily, keeping 7 days worth.
handler = logging.handlers.TimedRotatingFileHandler(
logfile, when='midnight', backupCount=7,
)
else:
handler = logging.StreamHandler(sys.stdout)
# Add a timestamp to the log records
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s',
'%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
# Apply the handler to the root logger
root = logging.getLogger()
root.addHandler(handler)
### use logging.INFO for now. switch to cmdline option or a config?
root.setLevel(logging.INFO)
def handle_options(options):
# Set up the logging, then process the rest of the options.
prepare_logging(options.logfile)
# In daemon mode, we let the daemonize module handle the pidfile.
# Otherwise, we should write this (foreground) PID into the file.
if options.pidfile and not options.daemon:
pid = os.getpid()
# Be wary of symlink attacks
try:
os.remove(options.pidfile)
except OSError:
pass
fd = os.open(options.pidfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.write(fd, '%d\n' % pid)
os.close(fd)
logging.info('pid %d written to %s', pid, options.pidfile)
if options.gid:
try:
gid = int(options.gid)
except ValueError:
import grp
gid = grp.getgrnam(options.gid)[2]
logging.info('setting gid %d', gid)
os.setgid(gid)
if options.uid:
try:
uid = int(options.uid)
except ValueError:
import pwd
uid = pwd.getpwnam(options.uid)[2]
logging.info('setting uid %d', uid)
os.setuid(uid)
def main(args):
parser = optparse.OptionParser(
description='An SvnPubSub client to keep working copies synchronized '
'with a repository.',
usage='Usage: %prog [options] CONFIG_FILE',
)
parser.add_option('--logfile',
help='filename for logging')
parser.add_option('--pidfile',
help="the process' PID will be written to this file")
parser.add_option('--uid',
help='switch to this UID before running')
parser.add_option('--gid',
help='switch to this GID before running')
parser.add_option('--umask',
help='set this (octal) umask before running')
parser.add_option('--daemon', action='store_true',
help='run as a background daemon')
options, extra = parser.parse_args(args)
if len(extra) != 1:
parser.error('CONFIG_FILE is required')
config_file = extra[0]
if options.daemon and not options.logfile:
parser.error('LOGFILE is required when running as a daemon')
if options.daemon and not options.pidfile:
parser.error('PIDFILE is required when running as a daemon')
# Process any provided options.
handle_options(options)
c = ReloadableConfig(config_file)
bdec = BigDoEverythingClasss(c)
# We manage the logfile ourselves (along with possible rotation). The
# daemon process can just drop stdout/stderr into /dev/null.
d = Daemon('/dev/null', os.path.abspath(options.pidfile),
options.umask, bdec)
if options.daemon:
# Daemonize the process and call sys.exit() with appropriate code
d.daemonize_exit()
else:
# Just run in the foreground (the default)
d.foreground()
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_20435 | import argparse
from tensorlayer.cli import train
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='tl')
subparsers = parser.add_subparsers(dest='cmd')
train_parser = subparsers.add_parser('train', help='train a model using multiple local GPUs or CPUs.')
train.build_arg_parser(train_parser)
args = parser.parse_args()
if args.cmd == 'train':
train.main(args)
else:
parser.print_help()
|
the-stack_0_20437 | import ast
import copy
import datetime
import os
import pathlib
from typing import AnyStr, Literal, Optional, Union
import discord
from discord.ext import commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
async def copy_context_with(ctx: commands.Context,
*,
author=None,
channel=None,
**kwargs):
alt_message: discord.Message = copy.copy(ctx.message)
alt_message._update(kwargs) # pylint: disable=protected-access
if author is not None:
alt_message.author = author
if channel is not None:
alt_message.channel = channel
# obtain and return a context of the same type
return await ctx.bot.get_context(alt_message, cls=type(ctx))
def check_if_user_joined_a_voice(ctx):
'''Checks is a user joined a voice channel'''
voice_state_author = ctx.author.voice
if voice_state_author is None or isinstance(voice_state_author.channel,discord.VoiceChannel):
return False
return True
def check_if_user_joined_a_stage(ctx):
'''Checks is a user joined a stage channel'''
voice_state_author = ctx.author.voice
if voice_state_author is None or isinstance(voice_state_author.channel,discord.StageChannel):
return False
return True
async def get_welcome_channel(guild: discord.Guild, bot: discord.Client, inviter_or_guild_owner: discord.User):
try:
return guild.system_channel
except:
try:
text_channels_list = guild.text_channels
for i in text_channels_list:
if i.permissions_for(bot.user).send_messages:
return i
except:
return inviter_or_guild_owner
# R.Danny Code
class plural:
def __init__(self, value):
self.value = value
def __format__(self, format_spec):
v = self.value
singular, sep, plural = format_spec.partition("|")
plural = plural or f"{singular}s"
if abs(v) != 1:
return f"{v} {plural}"
return f"{v} {singular}"
# R.Danny Code
def human_join(seq, delim=", ", final="or"):
size = len(seq)
if size == 0:
return ""
if size == 1:
return seq[0]
if size == 2:
return f"{seq[0]} {final} {seq[1]}"
return delim.join(seq[:-1]) + f" {final} {seq[-1]}"
def secure_delete(path: Union[AnyStr, pathlib.Path], passes: int = 3) -> None:
"""
At first it write the file with some random data , even repeatedly, then delete it
Meaning the entire contents of the file were still intact and every pass just added to the overall size of the file. So it ended up being [Original Contents][Random Data of that Size][Random Data of that Size][Random Data of that Size] which is not the desired effect obviously
Firstopen the file in append to find the length,
then reopen in r+ so that it can seek to the beginning
(in append mode it seems like what caused the undesired effect is that it was not actually possible to seek to 0)
Answer was copied from stackoverflow with some type hinting changes :)
https://stackoverflow.com/questions/17455300/python-securely-remove-file
"""
with open(path, "ba+", buffering=0) as delfile:
length: int = delfile.tell()
delfile.close()
with open(path, "br+", buffering=0) as delfile:
for i in range(passes):
delfile.seek(0, 0)
delfile.write(os.urandom(length))
delfile.seek(0)
for x in range(length):
delfile.write(b"\x00")
os.remove(path)
# R.Danny Code
def format_dt(dt,
style: Optional[str] = None,
ist: Optional[Union[bool, Literal[False]]] = False):
if dt.tzinfo is None and not ist:
dt = dt.replace(tzinfo=datetime.timezone.utc)
if ist:
timezone = datetime.timezone(datetime.timedelta(hours=5, minutes=30))
dt = dt.replace(tzinfo=timezone)
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
|
the-stack_0_20438 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Estimate fieldmaps for :abbr:`SDC (susceptibility distortion correction)`."""
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
LOGGER = logging.getLogger("nipype.workflow")
def init_brainextraction_wf(name="brainextraction_wf"):
"""
Remove nonbrain tissue from images.
Parameters
----------
name : :obj:`str`, optional
Workflow name (default: ``"brainextraction_wf"``)
Inputs
------
in_file : :obj:`str`
the GRE magnitude or EPI reference to be brain-extracted
bspline_dist : :obj:`int`, optional
Integer to replace default distance of b-spline separation for N4
Outputs
-------
out_file : :obj:`str`
the input file after N4 and smart clipping
out_brain : :obj:`str`
the output file, just the brain extracted
out_mask : :obj:`str`
the calculated mask
out_probseg : :obj:`str`
a probability map that the random walker reached
a given voxel (some sort of "soft" brainmask)
"""
from nipype.interfaces.ants import N4BiasFieldCorrection
from ..interfaces.brainmask import BrainExtraction
from ..interfaces.utils import IntensityClip
wf = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=("in_file", "bspline_dist")), name="inputnode"
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=(
"out_file",
"out_brain",
"out_mask",
"out_probseg",
)
),
name="outputnode",
)
clipper_pre = pe.Node(IntensityClip(), name="clipper_pre")
# de-gradient the fields ("bias/illumination artifact")
n4 = pe.Node(
N4BiasFieldCorrection(
dimension=3,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
shrink_factor=4,
),
n_procs=8,
name="n4",
)
clipper_post = pe.Node(IntensityClip(p_max=100.0), name="clipper_post")
masker = pe.Node(BrainExtraction(), name="masker")
# fmt:off
wf.connect([
(inputnode, clipper_pre, [("in_file", "in_file")]),
(inputnode, n4, [("bspline_dist", "bspline_fitting_distance")]),
(clipper_pre, n4, [("out_file", "input_image")]),
(n4, clipper_post, [("output_image", "in_file")]),
(clipper_post, masker, [("out_file", "in_file")]),
(clipper_post, outputnode, [("out_file", "out_file")]),
(masker, outputnode, [("out_file", "out_brain"),
("out_mask", "out_mask"),
("out_probseg", "out_probseg")]),
])
# fmt:on
return wf
|
the-stack_0_20440 | import os
import os.path
from datetime import datetime
from flask import (
abort,
Blueprint,
jsonify,
request,
Response,
)
from werkzeug.utils import secure_filename
from model import db, TextResource, ImageResource
from config import BASE_HOST, UPLOAD_FOLDER
api = Blueprint('api', __name__, url_prefix="")
@api.route('/', methods=['GET'])
def index():
return "hello,world"
@api.route('/', methods=['POST'])
def upload_resource():
if 'file' not in request.files:
abort(Response("Missing file", 400))
file = request.files['file']
if file.filename == '':
abort(Response("Empty filename", 400))
filename = secure_filename(file.filename)
# temporarily save file
file_path = os.path.join(UPLOAD_FOLDER, filename)
file.save(file_path)
extension = os.path.splitext(filename)[-1]
print(extension)
if extension == '.txt':
with open(file_path, 'UTF-8') as file:
text = file.read()
res = save_text(text)
if res != -1:
res = BASE_HOST + "/text/" + str(res)
return jsonify(res)
else:
return ""
if extension == '.jpg':
with open(file_path, 'rb') as file:
img = file.read()
res = save_image(img)
if res != -1:
res = BASE_HOST + "/image/" + str(res)
return jsonify(res)
else:
return ""
return ""
@api.route('/image/<int:id>', methods=['GET'])
def get_image(id):
image = ImageResource.query.get(id)
resp = Response(image.resource, mimetype="image/jpeg")
return resp
@api.route('/text/<int:id>', methods=['GET'])
def get_text(id):
text = TextResource.query.get(id)
print(text.to_dict())
return jsonify(text.resource)
@api.route('/text', methods=['POST'])
def create_text():
res = -1
if request.content_type.startswith("application/json"):
content = request.json
text = content.get('text')
res = save_text(text)
if res != -1:
res = BASE_HOST + "/text/" + str(res)
return jsonify(res)
else:
return ""
def save_text(text):
text_resource = TextResource(resource=text)
db.session.add(text_resource)
db.session.commit()
db.session.refresh(text_resource)
res = text_resource.id
return res
def save_image(image):
image_resource = ImageResource(resource=image)
db.session.add(image_resource)
db.session.commit()
db.session.refresh(image_resource)
return image_resource.id |
the-stack_0_20442 | """
Resources:
- https://en.wikipedia.org/wiki/Conjugate_gradient_method
- https://en.wikipedia.org/wiki/Definite_symmetric_matrix
"""
import numpy as np
def _is_matrix_spd(matrix: np.array) -> bool:
"""
Returns True if input matrix is symmetric positive definite.
Returns False otherwise.
For a matrix to be SPD, all eigenvalues must be positive.
>>> import numpy as np
>>> matrix = np.array([
... [4.12401784, -5.01453636, -0.63865857],
... [-5.01453636, 12.33347422, -3.40493586],
... [-0.63865857, -3.40493586, 5.78591885]])
>>> _is_matrix_spd(matrix)
True
>>> matrix = np.array([
... [0.34634879, 1.96165514, 2.18277744],
... [0.74074469, -1.19648894, -1.34223498],
... [-0.7687067 , 0.06018373, -1.16315631]])
>>> _is_matrix_spd(matrix)
False
"""
# Ensure matrix is square.
assert np.shape(matrix)[0] == np.shape(matrix)[1]
# If matrix not symmetric, exit right away.
if np.allclose(matrix, matrix.T) is False:
return False
# Get eigenvalues and eignevectors for a symmetric matrix.
eigen_values, _ = np.linalg.eigh(matrix)
# Check sign of all eigenvalues.
return np.all(eigen_values > 0)
def _create_spd_matrix(dimension: np.int64) -> np.array:
"""
Returns a symmetric positive definite matrix given a dimension.
Input:
dimension gives the square matrix dimension.
Output:
spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix.
>>> import numpy as np
>>> dimension = 3
>>> spd_matrix = _create_spd_matrix(dimension)
>>> _is_matrix_spd(spd_matrix)
True
"""
random_matrix = np.random.randn(dimension, dimension)
spd_matrix = np.dot(random_matrix, random_matrix.T)
assert _is_matrix_spd(spd_matrix)
return spd_matrix
def conjugate_gradient(
spd_matrix: np.array,
load_vector: np.array,
max_iterations: int = 1000,
tol: float = 1e-8,
) -> np.array:
"""
Returns solution to the linear system np.dot(spd_matrix, x) = b.
Input:
spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix.
load_vector is an Nx1 vector.
Output:
x is an Nx1 vector that is the solution vector.
>>> import numpy as np
>>> spd_matrix = np.array([
... [8.73256573, -5.02034289, -2.68709226],
... [-5.02034289, 3.78188322, 0.91980451],
... [-2.68709226, 0.91980451, 1.94746467]])
>>> b = np.array([
... [-5.80872761],
... [ 3.23807431],
... [ 1.95381422]])
>>> conjugate_gradient(spd_matrix, b)
array([[-0.63114139],
[-0.01561498],
[ 0.13979294]])
"""
# Ensure proper dimensionality.
assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
assert _is_matrix_spd(spd_matrix)
# Initialize solution guess, residual, search direction.
x0 = np.zeros((np.shape(load_vector)[0], 1))
r0 = np.copy(load_vector)
p0 = np.copy(r0)
# Set initial errors in solution guess and residual.
error_residual = 1e9
error_x_solution = 1e9
error = 1e9
# Set iteration counter to threshold number of iterations.
iterations = 0
while error > tol:
# Save this value so we only calculate the matrix-vector product once.
w = np.dot(spd_matrix, p0)
# The main algorithm.
# Update search direction magnitude.
alpha = np.dot(r0.T, r0) / np.dot(p0.T, w)
# Update solution guess.
x = x0 + alpha * p0
# Calculate new residual.
r = r0 - alpha * w
# Calculate new Krylov subspace scale.
beta = np.dot(r.T, r) / np.dot(r0.T, r0)
# Calculate new A conjuage search direction.
p = r + beta * p0
# Calculate errors.
error_residual = np.linalg.norm(r - r0)
error_x_solution = np.linalg.norm(x - x0)
error = np.maximum(error_residual, error_x_solution)
# Update variables.
x0 = np.copy(x)
r0 = np.copy(r)
p0 = np.copy(p)
# Update number of iterations.
iterations += 1
return x
def test_conjugate_gradient() -> None:
"""
>>> test_conjugate_gradient() # self running tests
"""
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
x_true = np.random.randn(dimension, 1)
b = np.dot(spd_matrix, x_true)
# Numpy solution.
x_numpy = np.linalg.solve(spd_matrix, b)
# Our implementation.
x_conjugate_gradient = conjugate_gradient(spd_matrix, b)
# Ensure both solutions are close to x_true (and therefore one another).
assert np.linalg.norm(x_numpy - x_true) <= 1e-6
assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_conjugate_gradient()
|
the-stack_0_20443 | """
db of Lin
~~~~~~~~~
:copyright: © 2018 by the Lin team.
:license: MIT, see LICENSE for more details.
"""
from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery
from sqlalchemy import inspect, orm, func
from contextlib import contextmanager
from .exception import NotFound
class SQLAlchemy(_SQLAlchemy):
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit()
except Exception as e:
self.session.rollback()
raise e
class Query(BaseQuery):
def filter_by(self, soft=False, **kwargs):
# soft 应用软删除
if soft:
kwargs['delete_time'] = None
return super(Query, self).filter_by(**kwargs)
def get_or_404(self, ident):
rv = self.get(ident)
if not rv:
raise NotFound()
return rv
def first_or_404(self):
rv = self.first()
if not rv:
raise NotFound()
return rv
db = SQLAlchemy(query_class=Query)
def get_total_nums(cls, is_soft=False, **kwargs):
nums = db.session.query(func.count(cls.id))
nums = nums.filter(cls.delete_time == None).filter_by(**kwargs).scalar() if is_soft else nums.filter().scalar()
if nums:
return nums
else:
return 0
class MixinJSONSerializer:
@orm.reconstructor
def init_on_load(self):
self._fields = []
self._exclude = []
self._set_fields()
self.__prune_fields()
def _set_fields(self):
pass
def __prune_fields(self):
columns = inspect(self.__class__).columns
if not self._fields:
all_columns = set([column.name for column in columns])
self._fields = list(all_columns - set(self._exclude))
def hide(self, *args):
for key in args:
self._fields.remove(key)
return self
def keys(self):
return self._fields
def __getitem__(self, key):
return getattr(self, key)
|
the-stack_0_20444 | import torch
from extensions.gridding import Gridding, GriddingReverse
from extensions.cubic_feature_sampling import CubicFeatureSampling
class RandomPointSampling(torch.nn.Module):
def __init__(self, n_points):
super(RandomPointSampling, self).__init__()
self.n_points = n_points
def forward(self, pred_cloud, partial_cloud=None):
if partial_cloud is not None:
pred_cloud = torch.cat([partial_cloud, pred_cloud], dim=1)
_ptcloud = torch.split(pred_cloud, 1, dim=0)
ptclouds = []
for p in _ptcloud:
non_zeros = torch.sum(p, dim=2).ne(0)
p = p[non_zeros].unsqueeze(dim=0)
n_pts = p.size(1)
if n_pts < self.n_points:
rnd_idx = torch.cat([torch.randint(0, n_pts, (self.n_points, ))])
else:
rnd_idx = torch.randperm(p.size(1))[:self.n_points]
ptclouds.append(p[:, rnd_idx, :])
return torch.cat(ptclouds, dim=0).contiguous()
class GRNet(torch.nn.Module):
def __init__(self):
super(GRNet, self).__init__()
self.gridding = Gridding(scale=64)
self.conv1 = torch.nn.Sequential(
torch.nn.Conv3d(1, 32, kernel_size=4, padding=2),
torch.nn.BatchNorm3d(32),
torch.nn.LeakyReLU(0.2),
torch.nn.MaxPool3d(kernel_size=2)
)
self.conv2 = torch.nn.Sequential(
torch.nn.Conv3d(32, 64, kernel_size=4, padding=2),
torch.nn.BatchNorm3d(64),
torch.nn.LeakyReLU(0.2),
torch.nn.MaxPool3d(kernel_size=2)
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv3d(64, 128, kernel_size=4, padding=2),
torch.nn.BatchNorm3d(128),
torch.nn.LeakyReLU(0.2),
torch.nn.MaxPool3d(kernel_size=2)
)
self.conv4 = torch.nn.Sequential(
torch.nn.Conv3d(128, 256, kernel_size=4, padding=2),
torch.nn.BatchNorm3d(256),
torch.nn.LeakyReLU(0.2),
torch.nn.MaxPool3d(kernel_size=2)
)
self.fc5 = torch.nn.Sequential(
torch.nn.Linear(16384, 2048),
torch.nn.ReLU()
)
self.fc6 = torch.nn.Sequential(
torch.nn.Linear(2048, 16384),
torch.nn.ReLU()
)
self.dconv7 = torch.nn.Sequential(
torch.nn.ConvTranspose3d(256, 128, kernel_size=4, stride=2, bias=False, padding=1),
torch.nn.BatchNorm3d(128),
torch.nn.ReLU()
)
self.dconv8 = torch.nn.Sequential(
torch.nn.ConvTranspose3d(128, 64, kernel_size=4, stride=2, bias=False, padding=1),
torch.nn.BatchNorm3d(64),
torch.nn.ReLU()
)
self.dconv9 = torch.nn.Sequential(
torch.nn.ConvTranspose3d(64, 32, kernel_size=4, stride=2, bias=False, padding=1),
torch.nn.BatchNorm3d(32),
torch.nn.ReLU()
)
self.dconv10 = torch.nn.Sequential(
torch.nn.ConvTranspose3d(32, 1, kernel_size=4, stride=2, bias=False, padding=1),
torch.nn.BatchNorm3d(1),
torch.nn.ReLU()
)
self.gridding_rev = GriddingReverse(scale=64)
self.point_sampling = RandomPointSampling(n_points=2048)
self.feature_sampling = CubicFeatureSampling()
self.fc11 = torch.nn.Sequential(
torch.nn.Linear(1792, 1792),
torch.nn.ReLU()
)
self.fc12 = torch.nn.Sequential(
torch.nn.Linear(1792, 448),
torch.nn.ReLU()
)
self.fc13 = torch.nn.Sequential(
torch.nn.Linear(448, 112),
torch.nn.ReLU()
)
self.fc14 = torch.nn.Linear(112, 24)
#self.load_state_dict(torch.load('/home/hqu/PycharmProjects/semester_project_cvlab-master2/EPNet-master/EPNet-master/model/GRNet-KITTI.pth', map_location=lambda storage, loc: storage.cuda(0)))
def forward(self, data):
with torch.no_grad():
partial_cloud = data['partial_cloud']
#print(partial_cloud.size()) # torch.Size([batch_size, 2048, 3])
pt_features_64_l = self.gridding(partial_cloud).view(-1, 1, 64, 64, 64)
#print(pt_features_64_l.size()) # torch.Size([batch_size, 1, 64, 64, 64])
pt_features_32_l = self.conv1(pt_features_64_l)
# print(pt_features_32_l.size()) # torch.Size([batch_size, 32, 32, 32, 32])
pt_features_16_l = self.conv2(pt_features_32_l)
# print(pt_features_16_l.size()) # torch.Size([batch_size, 64, 16, 16, 16])
pt_features_8_l = self.conv3(pt_features_16_l)
# print(pt_features_8_l.size()) # torch.Size([batch_size, 128, 8, 8, 8])
pt_features_4_l = self.conv4(pt_features_8_l)
# print(pt_features_4_l.size()) # torch.Size([batch_size, 256, 4, 4, 4])
features = self.fc5(pt_features_4_l.view(-1, 16384))
# print(features.size()) # torch.Size([batch_size, 2048])
pt_features_4_r = self.fc6(features).view(-1, 256, 4, 4, 4) + pt_features_4_l
# print(pt_features_4_r.size()) # torch.Size([batch_size, 256, 4, 4, 4])
pt_features_8_r = self.dconv7(pt_features_4_r) + pt_features_8_l
# print(pt_features_8_r.size()) # torch.Size([batch_size, 128, 8, 8, 8])
pt_features_16_r = self.dconv8(pt_features_8_r) + pt_features_16_l
# print(pt_features_16_r.size()) # torch.Size([batch_size, 64, 16, 16, 16])
pt_features_32_r = self.dconv9(pt_features_16_r) + pt_features_32_l
# print(pt_features_32_r.size()) # torch.Size([batch_size, 32, 32, 32, 32])
pt_features_64_r = self.dconv10(pt_features_32_r) + pt_features_64_l
# print(pt_features_64_r.size()) # torch.Size([batch_size, 1, 64, 64, 64])
sparse_cloud = self.gridding_rev(pt_features_64_r.squeeze(dim=1))
# print(sparse_cloud.size()) # torch.Size([batch_size, 262144, 3])
sparse_cloud = self.point_sampling(sparse_cloud, partial_cloud)
# print(sparse_cloud.size()) # torch.Size([batch_size, 2048, 3])
point_features_32 = self.feature_sampling(sparse_cloud, pt_features_32_r).view(-1, 2048, 256)
# print(point_features_32.size()) # torch.Size([batch_size, 2048, 256])
point_features_16 = self.feature_sampling(sparse_cloud, pt_features_16_r).view(-1, 2048, 512)
# print(point_features_16.size()) # torch.Size([batch_size, 2048, 512])
point_features_8 = self.feature_sampling(sparse_cloud, pt_features_8_r).view(-1, 2048, 1024)
# print(point_features_8.size()) # torch.Size([batch_size, 2048, 1024])
point_features = torch.cat([point_features_32, point_features_16, point_features_8], dim=2)
# print(point_features.size()) # torch.Size([batch_size, 2048, 1792])
point_features = self.fc11(point_features)
# print(point_features.size()) # torch.Size([batch_size, 2048, 1792])
point_features = self.fc12(point_features)
# print(point_features.size()) # torch.Size([batch_size, 2048, 448])
point_features = self.fc13(point_features)
# print(point_features.size()) # torch.Size([batch_size, 2048, 112])
point_offset = self.fc14(point_features).view(-1, 16384, 3)
# print(point_features.size()) # torch.Size([batch_size, 16384, 3])
dense_cloud = sparse_cloud.unsqueeze(dim=2).repeat(1, 1, 8, 1).view(-1, 16384, 3) + point_offset
# print(dense_cloud.size()) # torch.Size([batch_size, 16384, 3])
return sparse_cloud, dense_cloud
|
the-stack_0_20446 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import base64
import sys
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf-8') # pylint: disable=E1101
requests.packages.urllib3.disable_warnings()
URL = demisto.getParam('server')
if URL[-1] != '/':
URL += '/'
if not demisto.getParam('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
VALIDATE_CERT = not demisto.params().get('insecure', True)
id_and_api_key = demisto.getParam('credentials')['identifier'] + ':' + demisto.getParam('credentials')['password']
encoded_auth_key = base64.b64encode(id_and_api_key.encode("utf-8"))
mssp_account_id = demisto.getParam('mssp_sub_account_id')
HEADERS = {'Authorization': 'Basic {}'.format(encoded_auth_key.decode()), 'Content-Type': 'application/json',
'Account-Id': demisto.getParam('credentials')['identifier']}
# Change the Account-Id to the sub account id, so all actions will be on the sub account.
if mssp_account_id:
HEADERS['Account-Id'] = mssp_account_id
IOC_TYPE_TO_DBOT_TYPE = {
'IpAddresses': 'ip',
'Urls': 'url',
'Domains': 'domain',
'Hashes': 'hash'
}
DEFAULT_TIME_RANGE = '1 day'
SEVERITY_LEVEL = {
'All': 0,
'Low': 1,
'Medium': 2,
'High': 3
}
def req(method, path, json_data=None, params=None, json_response=False):
"""
Send the request to IntSights and return the JSON response
"""
r = requests.request(method, URL + path, headers=HEADERS, json=json_data, params=params, verify=VALIDATE_CERT)
if r.status_code < 200 or r.status_code > 299:
if not (r.text == 'SeverityNotChanged' or r.text == 'TagExist' or r.text == 'IocBlocklistStatusNotChanged'):
return_error('Error in API call to IntSights service %s - [%d] %s' % (path, r.status_code, r.text))
if r.status_code == 204:
return [] # type: ignore
if json_response:
try:
return r.json()
except ValueError:
return_error('Error in API call to IntSights service - check your configured URL address')
return r
def convert_iso_string_to_python_date(date_in_iso_format):
iso_format = "%Y-%m-%dT%H:%M:%S"
date_in_python_format = datetime.strptime(date_in_iso_format, iso_format)
return date_in_python_format
def convert_python_date_to_unix_millisecond(python_date_object):
timestamp_in_unix_millisecond = date_to_timestamp(python_date_object, 'datetime.datetime')
return timestamp_in_unix_millisecond
def increase_iso_by_x_days(date_in_iso_format, num_of_days):
date_in_python_format = convert_iso_string_to_python_date(date_in_iso_format)
new_date_in_python_format = date_in_python_format + timedelta(days=int(num_of_days))
new_date_in_iso_format = new_date_in_python_format.isoformat()
return new_date_in_iso_format
def remove_milliseconds_from_iso(date_in_iso_format):
date_parts_arr = date_in_iso_format.split('.')
date_in_iso_without_milliseconds = date_parts_arr[0]
return date_in_iso_without_milliseconds
def increase_timestamp_by_x_days(date_in_unix_ms_timestamp, num_of_days):
date_in_iso = timestamp_to_datestring(date_in_unix_ms_timestamp)
date_in_iso_without_ms = remove_milliseconds_from_iso(date_in_iso)
date_in_iso_plus_x_days = increase_iso_by_x_days(date_in_iso_without_ms, num_of_days)
timestamp_in_unix_ms_plus_x_days = date_to_timestamp(date_in_iso_plus_x_days)
return timestamp_in_unix_ms_plus_x_days
def update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp):
params['foundDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['foundDateTo'] = now_date_in_unix_timestamp
params['sourceDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['sourceDateTo'] = now_date_in_unix_timestamp
def update_params_with_delta_arg(params, time_delta_in_days_int):
now_date_in_iso = datetime.utcnow().isoformat()
now_date_in_iso_without_ms = remove_milliseconds_from_iso(now_date_in_iso)
now_date_in_unix_timestamp = date_to_timestamp(now_date_in_iso_without_ms)
oldest_day_to_search_in_unix_timestamp = increase_timestamp_by_x_days(now_date_in_unix_timestamp,
-1 * time_delta_in_days_int)
update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp)
del params['time-delta']
def update_params_dict_according_to_delta_arg(params, time_delta_in_days_int):
if 'foundDateFrom' in params or 'foundDateTo' in params:
demisto.debug(
"ERROR in get_alerts() - can't use found-date-to or found-date-from arguments with time-delta argument")
return_error("Error: can't assign delta when assigned both found-date-to or found-date-from")
else:
update_params_with_delta_arg(params, time_delta_in_days_int)
return params
def handle_filters(foundDateFrom=None):
"""
Apply filters to alert list
"""
argsConversion = {
'alert-type': 'alertType',
'source-type': 'sourceType',
'network-type': 'networkType',
'source-date-from': 'sourceDateFrom',
'source-date-to': 'sourceDateTo',
'found-date-from': 'foundDateFrom',
'found-date-to': 'foundDateTo',
'is-flagged': 'isFlagged',
'is-closed': 'isClosed',
'source-ID': 'sourceId',
'first-seen-from': 'firstSeenFrom',
'first-seen-to': 'firstSeenTo',
'last-seen-from': 'lastSeenFrom',
'last-seen-to': 'lastSeenTo',
'value': 'iocValue',
}
params = {}
for k in demisto.args():
if demisto.getArg(k):
params[argsConversion.get(k) or k] = demisto.getArg(k)
if demisto.getArg('time-delta'):
time_delta_in_days = demisto.getArg('time-delta')
update_params_dict_according_to_delta_arg(params, int(time_delta_in_days))
elif foundDateFrom:
params['foundDateFrom'] = foundDateFrom
return params
def get_alerts_helper(params):
demisto.info("Executing get_alerts with params: {}".format(params))
resp = req('GET', 'public/v1/data/alerts/alerts-list', params=params, json_response=True)
alerts_HR = []
alerts_ctx = []
for alert_id in resp:
alert_informationHR, alert_informationCtx = get_alert_by_id_helper(alert_id)
alerts_HR.append(alert_informationHR)
alerts_ctx.append(alert_informationCtx)
return alerts_HR, alerts_ctx
def extract_mail(replies):
mails = []
for reply in replies:
mails.append(reply.get('Email'))
return '\n'.join(mails)
def extract_remediation(remidiations):
remdies = []
string_format = "{0} - Status: {1}"
for remdy in remidiations:
remdies.append(string_format.format(remdy.get('Value'), remdy.get('Status')))
return '\n'.join(remdies)
def hash_identifier(hash_val):
if md5Regex.match(hash_val):
return 'MD5'
elif sha1Regex.match(hash_val):
return 'SHA1'
elif sha256Regex.match(hash_val):
return 'SHA256'
return 'Unknown'
def extract_tags(tags):
pretty_tags = []
string_format = "ID: {0} - Name: {1}"
for tag in tags:
pretty_tags.append(string_format.format(tag.get('_id'), tag.get('Name')))
return pretty_tags
def get_alerts():
"""
Gets all alerts and returns as a list.
"""
alerts_HR, alerts_ctx = get_alerts_helper(handle_filters())
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alerts_ctx},
'Contents': alerts_ctx,
'HumanReadable': tableToMarkdown('IntSights Alerts', alerts_HR,
['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged', 'Images', 'Tags',
'Description', 'Title', 'TakedownStatus', 'SubType'], removeNull=False),
'ContentsFormat': formats['json']
})
def alert_to_readable(r, parse_tags):
"""
Convert alert to readable format
"""
readable = {
'ID': demisto.get(r, '_id'),
'Severity': demisto.get(r, 'Details.Severity'),
'Type': demisto.get(r, 'Details.Type'),
'FoundDate': demisto.get(r, 'FoundDate'),
'SourceType': demisto.get(r, 'Details.Source.Type'),
'SourceURL': demisto.get(r, 'Details.Source.URL'),
'SourceEmail': demisto.get(r, 'Details.Source.Email'),
'SourceNetworkType': demisto.get(r, 'Details.Source.NetworkType'),
'IsClosed': demisto.get(r, 'IsClosed'),
'IsFlagged': demisto.get(r, 'IsFlagged'),
'Assets': demisto.get(r, 'Assets'),
'Images': demisto.get(r, 'Details.Images'),
'Description': demisto.get(r, 'Details.Description'),
'Title': demisto.get(r, 'Details.Title'),
'TakedownStatus': demisto.get(r, 'TakedownStatus'),
'SubType': demisto.get(r, 'Details.SubType')
}
tags = demisto.get(r, 'Details.Tags')
if parse_tags:
readable['Tags'] = extract_tags(tags)
else:
readable['Tag'] = []
for tag in tags:
readable['Tag'].append({'ID': tag.get('_id'), 'Name': tag.get('Name')})
return readable
def get_alert_by_id_helper(alert_id):
"""
Helper for getting details by ID
"""
r = req('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
return alert_to_readable(r, True), alert_to_readable(r, False)
def get_alert_by_id():
"""
Get alert details by id
"""
alert_id = demisto.getArg('alert-id')
activity_hr, activity_ctx = get_alert_by_id_helper(alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': activity_ctx},
'Contents': activity_hr,
'HumanReadable': tableToMarkdown('IntSights Alert Details', [activity_hr],
['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged', 'Images', 'Tags',
'Description', 'Title', 'TakedownStatus', 'SubType']),
'ContentsFormat': formats['json']
})
def get_alert_image():
"""
Retrieves the alert image by image_id
"""
image_id = demisto.getArg('image-id')
r = req('GET', 'public/v1/data/alerts/alert-image/' + image_id)
demisto.results(fileResult(image_id + '-image.jpeg', r.content))
def ask_analyst():
"""
Send question to an analyst about the requested alert
"""
alert_id = demisto.getArg('alert-id')
question = demisto.getArg('question')
req('POST', 'public/v1/data/alerts/ask-the-analyst/' + alert_id, json_data={'Question': question})
question_details = {'ID': alert_id, 'Question': question}
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': question_details},
'Contents': question_details,
'HumanReadable': tableToMarkdown(
'IntSights Ask the Analyst: Your question has been successfully sent to an analyst about the requested alert',
[question_details], ['ID', 'Question']),
'ContentsFormat': formats['json']
}
)
def get_alert_activity():
"""
Retrieves the alert activity by alert-id
"""
alert_id = demisto.getArg('alert-id')
r = req('GET', 'public/v1/data/alerts/activity-log/' + alert_id, json_response=True)
human_readables = []
alert = {'ID': alert_id, 'Activities': []}
for k in r:
alert['Activities'].append({
'ID': demisto.get(k, '_id'),
'Type': demisto.get(k, 'Type'),
'Initiator': demisto.get(k, 'Initiator'),
'CreatedDate': demisto.get(k, 'CreatedDate'),
'UpdateDate': demisto.get(k, 'UpdateDate'),
'RemediationBlocklistUpdate': demisto.get(k, 'AdditionalInformation.RemediationBlocklistUpdate'),
'AskTheAnalyst': {'Replies': demisto.get(k, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': {'Replies': demisto.get(k, 'AdditionalInformation.Mail.Replies')},
'ReadBy': demisto.get(k, 'ReadBy')
})
human_readables.append({
'ID': demisto.get(k, '_id'),
'Type': demisto.get(k, 'Type'),
'Initiator': demisto.get(k, 'Initiator'),
'CreatedDate': demisto.get(k, 'CreatedDate'),
'UpdateDate': demisto.get(k, 'UpdateDate'),
'RemediationBlocklistUpdate': extract_remediation(
demisto.get(k, 'AdditionalInformation.RemediationBlocklistUpdate')),
'AskTheAnalyst': {'Replies': demisto.get(k, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': extract_mail(demisto.get(k, 'AdditionalInformation.Mail.Replies')),
'ReadBy': demisto.get(k, 'ReadBy')
})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert},
'Contents': r,
'HumanReadable': tableToMarkdown('IntSights Alert Activity Log', human_readables,
['ID', 'Type', 'Initiator', 'CreatedDate', 'UpdateDate',
'RemediationBlocklistUpdate', 'AskTheAnalyst', 'Mail', 'ReadBy']),
'ContentsFormat': formats['json']
})
def change_severity():
"""
Change severity of an alert
"""
alert_id = demisto.getArg('alert-id')
severity = demisto.getArg('severity')
req('PATCH', 'public/v1/data/alerts/change-severity/' + alert_id, json_data={'Severity': severity})
severity_details = {'ID': alert_id, 'Severity': severity}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': severity_details},
'Contents': severity_details,
'HumanReadable': tableToMarkdown(
'IntSights Update Alert Severity: The Alert severity has been successfully updated.', [severity_details],
['ID', 'Severity']),
'ContentsFormat': formats['json']
})
def get_assignee_id(assignee_email):
r = req('GET', 'public/v1/account/users-details', json_response=True)
for user in r:
if assignee_email == user.get('Email', ''):
return user.get('_id')
raise Exception('user not found')
def assign_alert():
"""
Assign alert to an Assignee ID
"""
alert_id = demisto.getArg('alert-id')
assignee_email = demisto.getArg('assignee-email')
is_mssp = demisto.getArg('is-mssp-optional')
assignee_id = get_assignee_id(assignee_email)
assign_details = {'ID': alert_id, 'Assignees.AssigneeID': assignee_id}
url = 'public/v1/data/alerts/assign-alert/' + alert_id
if is_mssp:
url += '?IsMssp=' + is_mssp
req('PATCH', url, json_data={'AssigneeID': assignee_id})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': assign_details},
'Contents': assign_details,
'HumanReadable': tableToMarkdown(
'IntSights Assign Alert: The Alert has been successfully assigned to assigneeID', [assign_details],
['ID', 'Assignees.AssigneeID']),
'ContentsFormat': formats['json']
})
def unassign_alert():
"""
Unassign an alert
"""
alert_id = demisto.getArg('alert-id')
req('PATCH', 'public/v1/data/alerts/unassign-alert/' + alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id}},
'Contents': {'ID': alert_id},
'HumanReadable': 'Alert id: ' + alert_id + ' successfully unassigned',
'ContentsFormat': formats['json']
})
def close_alert():
"""
Close an alert
"""
alert_id = demisto.getArg('alert-id')
reason = demisto.getArg('reason')
free_text = demisto.getArg('free-text')
is_hidden = demisto.getArg('is-hidden')
rate = demisto.getArg('rate')
close_details = {'ID': alert_id, 'Close Reason': reason, 'Closed FreeText': free_text, 'Closed Rate': rate,
'IsHidden': is_hidden}
close_details_context = {'ID': alert_id, 'Closed': {'Reason': reason, 'FreeText': free_text, 'Rate': rate},
'IsHidden': is_hidden}
url = 'public/v1/data/alerts/close-alert/' + alert_id
json_data = {'Reason': reason}
if free_text:
json_data['FreeText'] = free_text
if free_text:
json_data['IsHidden'] = is_hidden
if free_text:
json_data['Rate'] = rate
req('PATCH', url, json_data)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': close_details},
'Contents': close_details_context,
'HumanReadable': tableToMarkdown('IntSights Close Alert: The Alert has successfully been closed.',
[close_details],
['ID', 'Close Reason', 'Closed FreeText', 'Closed Rate', 'IsHidden']),
'ContentsFormat': formats['json']
})
def send_mail():
"""
Send email with the alert details and a question
"""
alert_id = demisto.getArg('alert-id')
emails = argToList(demisto.getArg('emails'))
content = demisto.getArg('content')
req('POST', 'public/v1/data/alerts/send-mail/' + alert_id, {'Emails': emails, 'Content': content})
ec = {
'ID': alert_id,
'EmailID': emails,
'Question': content
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': 'Email with content (' + content + ') sent to emails',
'ContentsFormat': formats['json']
})
def get_tag_id(alert_id, tag_name):
res = req('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
details = res.get('Details', {})
tags = details.get('Tags', [])
for tag in tags:
if tag.get('Name', '') == tag_name:
return tag.get('_id', '')
return 'Not found'
def add_tag():
"""
Adds a tag to the alert
"""
alert_id = demisto.getArg('alert-id')
tag_name = demisto.getArg('tag-name')
req('PATCH', 'public/v1/data/alerts/add-tag/' + alert_id, json_data={'TagName': tag_name})
tag_info = {
'TagName': tag_name,
'ID': get_tag_id(alert_id, tag_name)
}
ec = {
'ID': alert_id,
'Tags': tag_info
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': 'Tag (' + tag_name + ') added to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def remove_tag():
"""
Removes a tag from an alert
"""
alert_id = demisto.getArg('alert-id')
tag_id = demisto.getArg('tag-id')
req('PATCH', 'public/v1/data/alerts/remove-tag/' + alert_id, json_data={'TagID': tag_id})
ec = {
'ID': alert_id,
'Tags': {'ID': tag_id}
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': 'Tag id: ' + tag_id + ' removed from alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def add_comment():
"""
Adds a comment to an alert
"""
alert_id = demisto.getArg('alert-id')
comment = demisto.getArg('comment')
req('PATCH', 'public/v1/data/alerts/add-comment/' + alert_id, json_data={'Comment': comment})
ec = {
'ID': alert_id,
'Comment': comment
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': 'Succesfully added comment "' + comment + '" to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def IOC_to_readable(r):
"""
Convert IOC to readable format
"""
ioc_context = {
'ID': demisto.get(r, '_id'),
'SourceID': demisto.get(r, 'SourceID'),
'AccountID': demisto.get(r, 'AccountID'),
'Type': demisto.get(r, 'Type'),
'Value': demisto.get(r, 'Value'),
'FirstSeen': demisto.get(r, 'FirstSeen'),
'LastSeen': demisto.get(r, 'LastSeen'),
'Domain': demisto.get(r, 'Domain'),
'Status': demisto.get(r, 'Status'),
'Severity': demisto.get(r, 'Severity'),
'SourceName': demisto.get(r, 'Source.Name'),
'SourceConfidence': demisto.get(r, 'Source.Confidence'),
'Flags': {'IsInAlexa': demisto.get(r, 'Flags.IsInAlexa')},
'Enrichment': {
'Status': demisto.get(r, 'Enrichment.Status'),
'Data': demisto.get(r, 'Enrichment.Data'),
'Date': demisto.get(r, 'Enrichment.Data') # Backwards compatability issue
}
}
ioc_readable = {
'ID': demisto.get(r, '_id'),
'SourceID': demisto.get(r, 'SourceID'),
'AccountID': demisto.get(r, 'AccountID'),
'Type': demisto.get(r, 'Type'),
'Value': demisto.get(r, 'Value'),
'FirstSeen': demisto.get(r, 'FirstSeen'),
'LastSeen': demisto.get(r, 'LastSeen'),
'Domain': demisto.get(r, 'Domain'),
'Status': demisto.get(r, 'Status'),
'Severity': demisto.get(r, 'Severity').get('Value'),
'SourceName': demisto.get(r, 'Source.Name'),
'SourceConfidence': demisto.get(r, 'Source.Confidence'),
'IsInAlexa': demisto.get(r, 'Flags.IsInAlexa'),
'Enrichment Status': demisto.get(r, 'Enrichment.Status'),
'Enrichment Data': demisto.get(r, 'Enrichment.Data')
}
dbot_score = {
'Indicator': ioc_context['Value'],
'Type': IOC_TYPE_TO_DBOT_TYPE[ioc_context['Type']],
'Vendor': 'IntSights',
'Score': translate_severity(ioc_readable['Severity'])
}
malicious_dict = {
'Vendor': 'IntSights',
'Description': 'IntSights severity level is High'
}
domain = {}
if ioc_context['Domain']:
domain['Name'] = ioc_context['Domain']
if translate_severity(ioc_readable['Severity']) == 3:
domain['Malicious'] = malicious_dict
ip_info = {}
if ioc_context['Type'] == 'IpAddresses':
ip_info['Address'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
ip_info['Malicious'] = malicious_dict
url_info = {}
if ioc_context['Type'] == 'Urls':
url_info['Data'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
url_info['Malicious'] = malicious_dict
hash_info = {}
if ioc_context['Type'] == 'Hashes':
hash_info['Name'] = ioc_context['Value']
hash_info[hash_identifier(ioc_context['Value'])] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
hash_info['Malicious'] = malicious_dict
return ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info
def search_for_IOC():
"""
Search for IOC by value
"""
r = req('GET', 'public/v1/iocs/ioc-by-value', params=handle_filters(), json_response=True)
if r:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = IOC_to_readable(r)
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs(val.ID === obj.ID)': ioc_context,
'DBotScore': dbot_score,
'Domain': domain,
'IP': ip_info,
'URL': url_info,
'File': hash_info
},
'Contents': r,
'HumanReadable': tableToMarkdown('IOC Information', [ioc_readable],
['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen',
'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName',
'SourceConfidence', 'IsInAlexa', 'Enrichment Status',
'Enrichment Data']),
'ContentsFormat': formats['json']
}
)
else:
results_for_no_content('IOC Information')
def results_for_no_content(cmd_name):
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights': {}},
'Contents': {},
'HumanReadable': '### {} \n\n Could not get any results.'.format(cmd_name),
'ContentsFormat': formats['json']
}
)
def translate_severity(sev):
"""
Translate alert severity to demisto
"""
if sev == 'High' or sev == 'Medium':
return 3
elif sev == 'Low':
return 2
return 0
def fetch_incidents():
"""
Fetch incidents for Demisto
"""
now = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
lastRunObject = demisto.getLastRun()
if not lastRunObject and lastRunObject.get('time'):
fetch_delta, _ = parse_date_range(demisto.params().get('fetch_delta', DEFAULT_TIME_RANGE), to_timestamp=True)
else:
fetch_delta = lastRunObject.get('time')
alert_type = demisto.getParam('type')
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
raise Exception("Minimum Alert severity level to fetch incidents incidents from, allowed values are: ''All'',"
" ''Low'', ''Medium'',''High''(Setting to All will fetch all incidents)")
alerts_HR, alerts_ctx = get_alerts_helper(handle_filters(fetch_delta))
incidents = []
for alert in alerts_ctx:
if SEVERITY_LEVEL[min_severity_level] <= SEVERITY_LEVEL[alert.get('Severity', 'Low')]:
if not alert_type or alert_type.lower() == alert.get('Type', '').lower():
incidents.append({
'name': '{type} - {id}'.format(type=alert.get('Type', 'Type not found'), id=alert.get('ID')),
'occurred': alert.get('FoundDate'),
'severity': translate_severity(alert.get('Severity')),
'rawJSON': json.dumps(alert)
})
demisto.incidents(incidents)
demisto.setLastRun({'time': now})
def get_iocs():
"""
Gets all IOCs with the given filters
"""
r = req('GET', 'public/v1/iocs/complete-iocs-list', params=handle_filters(), json_response=True)
domains = []
ip_infos = []
url_infos = []
hash_infos = []
dbot_scores = []
iocs_context = []
iocs_readable = []
for k in r:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = IOC_to_readable(k)
iocs_context.append(ioc_context)
iocs_readable.append(ioc_readable)
dbot_scores.append(dbot_score)
domains.append(domain)
ip_infos.append(ip_info)
url_infos.append(url_info)
hash_infos.append(hash_info)
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs': iocs_context,
'DBotScore': dbot_scores,
'Domain': domains,
'IP': ip_infos,
'URL': url_infos,
'File': hash_infos
},
'Contents': r,
'HumanReadable': tableToMarkdown('IOC Information', iocs_readable,
['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen',
'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence',
'IsInAlexa', 'Enrichment Status', 'Enrichment Data']),
'ContentsFormat': formats['json']
}
)
def takedown_request():
"""
Request alert takedown
"""
alert_id = demisto.getArg('alert-id')
req('PATCH', 'public/v1/data/alerts/takedown-request/' + alert_id)
ec = {
'ID': alert_id,
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': '### IntSights Alert Takedown\n' + 'The Alert Takedown request has been sent successfully for ' + str(
alert_id),
'ContentsFormat': formats['json']
})
def get_alert_takedown_status():
"""
Get an alert's takedown status
"""
alert_id = demisto.getArg('alert-id')
r = req('GET', 'public/v1/data/alerts/takedown-status/' + alert_id)
ec = {
'ID': alert_id,
'TakedownStatus': r.text
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': ec},
'Contents': ec,
'HumanReadable': tableToMarkdown('IntSights Alert Takedown Status', [ec], ['ID', 'TakedownStatus']),
'ContentsFormat': formats['json']
})
def update_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
types = argToList(demisto.getArg('type'))
values = argToList(demisto.getArg('value'))
statuses = argToList(demisto.getArg('blocklist-status'))
if len(types) != len(values) or len(types) != len(statuses):
return_error('The lists must be of equal length. For each IOC, provide an entry in each list.')
data = []
for i in range(len(types)):
data.append({
'Type': types[i],
'Value': values[i],
'BlocklistStatus': statuses[i]
})
req('PATCH', 'public/v1/data/alerts/change-iocs-blocklist-status/' + alert_id, json_data={'Iocs': data})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': statuses}},
'Contents': {'ID': alert_id, 'Status': statuses},
'HumanReadable': tableToMarkdown('IntSights Update IOC BlockList Status for ' + alert_id, data,
['BlocklistStatus']),
'ContentsFormat': formats['json']
})
def get_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
r = req('GET', 'public/v1/data/alerts/blocklist-status/' + alert_id, json_response=True)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': [s.get('Status') for s in r]}},
'Contents': r,
'HumanReadable': tableToMarkdown('IntSights Blocklist Status for ' + alert_id, r, ['Status']),
'ContentsFormat': formats['json']
})
def get_mssp_sub_accounts():
account_id = demisto.getParam('credentials')['identifier']
accounts = req('GET', 'public/v1/mssp/customers', json_response=True)
if not accounts:
return_error("intsights-mssp-get-sub-accounts failed to return data.")
# Fix accounts _id keys
for account in accounts:
account["ID"] = account["_id"]
del account["_id"]
if len(accounts) < 1:
return_error('Current MSSP Account has no sub accounts.')
account_ids = [i["ID"] for i in accounts]
if mssp_account_id not in account_ids:
demisto.log("[DEBUG] - MSSP sub accounts:" + str(accounts))
return_error('Entered sub account id ({}) is not part of this mssp account'.format(mssp_account_id))
for i, account in enumerate(account_ids):
# Call account
HEADERS['Account-Id'] = account
account_ua = req('GET', 'public/v1/account/used-assets', json_response=True)
if not account_ua:
continue
accounts[i].update(account_ua)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.MsspAccount(val.ID === obj.ID)': accounts},
'HumanReadable': tableToMarkdown('IntSights MSSP accounts used assets ' + account_id, [a for a in accounts],
["ID", 'CompanyName', "Status", "AssetsLimit", "AssetsCount"]),
'Contents': accounts,
'ContentsFormat': formats['json']
})
# Restore the header
HEADERS['Account-Id'] = mssp_account_id
def test_module():
req('GET', 'public/v1/api/version')
if demisto.params().get('isFetch'):
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
return_error("Minimum Alert severity level to fetch incidents incidents from, allowed values are: "
"''All'', ''Low'', ''Medium'',''High''(Setting to All will fetch all incidents)")
demisto.results('ok')
if demisto.command() == 'test-module':
test_module()
elif demisto.command() == 'intsights-mssp-get-sub-accounts':
get_mssp_sub_accounts()
elif demisto.command() == 'intsights-get-alerts':
get_alerts()
elif demisto.command() == 'intsights-get-alert-image':
get_alert_image()
elif demisto.command() == 'intsights-get-alert-activities':
get_alert_activity()
elif demisto.command() == 'intsights-assign-alert':
assign_alert()
elif demisto.command() == 'intsights-unassign-alert':
unassign_alert()
elif demisto.command() == 'intsights-send-mail':
send_mail()
elif demisto.command() == 'intsights-ask-the-analyst':
ask_analyst()
elif demisto.command() == 'intsights-add-tag-to-alert':
add_tag()
elif demisto.command() == 'intsights-remove-tag-from-alert':
remove_tag()
elif demisto.command() == 'intsights-add-comment-to-alert':
add_comment()
elif demisto.command() == 'intsights-update-alert-severity':
change_severity()
elif demisto.command() == 'intsights-get-alert-by-id':
get_alert_by_id()
elif demisto.command() == 'intsights-get-ioc-by-value':
search_for_IOC()
elif demisto.command() == 'intsights-get-iocs':
get_iocs()
elif demisto.command() == 'intsights-alert-takedown-request':
takedown_request()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'intsights-get-alert-takedown-status':
get_alert_takedown_status()
elif demisto.command() == 'intsights-get-ioc-blocklist-status':
get_ioc_blocklist_status()
elif demisto.command() == 'intsights-update-ioc-blocklist-status':
update_ioc_blocklist_status()
elif demisto.command() == 'intsights-close-alert':
close_alert()
else:
return_error('Unrecognized command: ' + demisto.command())
|
the-stack_0_20447 | # nuScenes dev-kit.
# Code written by Holger Caesar, 2019.
"""
This script converts nuScenes data to KITTI format and KITTI results to nuScenes.
It is used for compatibility with software that uses KITTI-style annotations.
We do not encourage this, as:
- KITTI has only front-facing cameras, whereas nuScenes has a 360 degree horizontal fov.
- KITTI has no radar data.
- The nuScenes database format is more modular.
- KITTI fields like occluded and truncated cannot be exactly reproduced from nuScenes data.
- KITTI has different categories.
Limitations:
- We don't specify the KITTI imu_to_velo_kitti projection in this code base.
- We map nuScenes categories to nuScenes detection categories, rather than KITTI categories.
- Attributes are not part of KITTI and therefore set to '' in the nuScenes result format.
- Velocities are not part of KITTI and therefore set to 0 in the nuScenes result format.
- This script uses the `train` and `val` splits of nuScenes, whereas standard KITTI has `training` and `testing` splits.
This script includes three main functions:
- nuscenes_gt_to_kitti(): Converts nuScenes GT annotations to KITTI format.
- render_kitti(): Render the annotations of the (generated or real) KITTI dataset.
- kitti_res_to_nuscenes(): Converts a KITTI detection result to the nuScenes detection results format.
To launch these scripts run:
- python export_kitti.py nuscenes_gt_to_kitti --nusc_kitti_dir ~/nusc_kitti
- python export_kitti.py render_kitti --nusc_kitti_dir ~/nusc_kitti --render_2d False
- python export_kitti.py kitti_res_to_nuscenes --nusc_kitti_dir ~/nusc_kitti
Note: The parameter --render_2d specifies whether to draw 2d or 3d boxes.
To work with the original KITTI dataset, use these parameters:
--nusc_kitti_dir /data/sets/kitti --split training
See https://www.nuscenes.org/object-detection for more information on the nuScenes result format.
"""
import json
import os
from typing import List, Dict, Any
import fire
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from pyquaternion import Quaternion
from nuscenes_radar_devkit.nuscenes.eval.detection.utils import category_to_detection_name
from nuscenes_radar_devkit.nuscenes import NuScenes
from nuscenes_radar_devkit.nuscenes.utils.data_classes import LidarPointCloud, Box
from nuscenes_radar_devkit.nuscenes.utils.geometry_utils import BoxVisibility, transform_matrix
from nuscenes_radar_devkit.nuscenes.utils.kitti import KittiDB
from nuscenes_radar_devkit.nuscenes.utils.splits import create_splits_logs
class KittiConverter:
def __init__(self,
nusc_kitti_dir: str = '~/nusc_kitti',
cam_name: str = 'CAM_FRONT',
lidar_name: str = 'LIDAR_TOP',
image_count: int = 10,
nusc_version: str = 'v1.0-mini',
split: str = 'mini_train'):
"""
:param nusc_kitti_dir: Where to write the KITTI-style annotations.
:param cam_name: Name of the camera to export. Note that only one camera is allowed in KITTI.
:param lidar_name: Name of the lidar sensor.
:param image_count: Number of images to convert.
:param nusc_version: nuScenes version to use.
:param split: Dataset split to use.
"""
self.nusc_kitti_dir = os.path.expanduser(nusc_kitti_dir)
self.cam_name = cam_name
self.lidar_name = lidar_name
self.image_count = image_count
self.nusc_version = nusc_version
self.split = split
# Create nusc_kitti_dir.
if not os.path.isdir(self.nusc_kitti_dir):
os.makedirs(self.nusc_kitti_dir)
# Select subset of the data to look at.
self.nusc = NuScenes(version=nusc_version)
def nuscenes_gt_to_kitti(self) -> None:
"""
Converts nuScenes GT annotations to KITTI format.
"""
kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
kitti_to_nu_lidar_inv = kitti_to_nu_lidar.inverse
imsize = (1600, 900)
token_idx = 0 # Start tokens from 0.
# Get assignment of scenes to splits.
split_logs = create_splits_logs(self.split, self.nusc)
# Create output folders.
label_folder = os.path.join(self.nusc_kitti_dir, self.split, 'label_2')
calib_folder = os.path.join(self.nusc_kitti_dir, self.split, 'calib')
image_folder = os.path.join(self.nusc_kitti_dir, self.split, 'image_2')
lidar_folder = os.path.join(self.nusc_kitti_dir, self.split, 'velodyne')
for folder in [label_folder, calib_folder, image_folder, lidar_folder]:
if not os.path.isdir(folder):
os.makedirs(folder)
# Use only the samples from the current split.
sample_tokens = self._split_to_samples(split_logs)
sample_tokens = sample_tokens[:self.image_count]
tokens = []
for sample_token in sample_tokens:
# Get sample data.
sample = self.nusc.get('sample', sample_token)
sample_annotation_tokens = sample['anns']
cam_front_token = sample['data'][self.cam_name]
lidar_token = sample['data'][self.lidar_name]
# Retrieve sensor records.
sd_record_cam = self.nusc.get('sample_data', cam_front_token)
sd_record_lid = self.nusc.get('sample_data', lidar_token)
cs_record_cam = self.nusc.get('calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
cs_record_lid = self.nusc.get('calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
# Combine transformations and convert to KITTI format.
# Note: cam uses same conventions in KITTI and nuScenes.
lid_to_ego = transform_matrix(cs_record_lid['translation'], Quaternion(cs_record_lid['rotation']),
inverse=False)
ego_to_cam = transform_matrix(cs_record_cam['translation'], Quaternion(cs_record_cam['rotation']),
inverse=True)
velo_to_cam = np.dot(ego_to_cam, lid_to_ego)
# Convert from KITTI to nuScenes LIDAR coordinates, where we apply velo_to_cam.
velo_to_cam_kitti = np.dot(velo_to_cam, kitti_to_nu_lidar.transformation_matrix)
# Currently not used.
imu_to_velo_kitti = np.zeros((3, 4)) # Dummy values.
r0_rect = Quaternion(axis=[1, 0, 0], angle=0) # Dummy values.
# Projection matrix.
p_left_kitti = np.zeros((3, 4))
p_left_kitti[:3, :3] = cs_record_cam['camera_intrinsic'] # Cameras are always rectified.
# Create KITTI style transforms.
velo_to_cam_rot = velo_to_cam_kitti[:3, :3]
velo_to_cam_trans = velo_to_cam_kitti[:3, 3]
# Check that the rotation has the same format as in KITTI.
assert (velo_to_cam_rot.round(0) == np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]])).all()
assert (velo_to_cam_trans[1:3] < 0).all()
# Retrieve the token from the lidar.
# Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
# not the camera.
filename_cam_full = sd_record_cam['filename']
filename_lid_full = sd_record_lid['filename']
# token = '%06d' % token_idx # Alternative to use KITTI names.
token_idx += 1
# Convert image (jpg to png).
src_im_path = os.path.join(self.nusc.dataroot, filename_cam_full)
dst_im_path = os.path.join(image_folder, sample_token + '.png')
if not os.path.exists(dst_im_path):
im = Image.open(src_im_path)
im.save(dst_im_path, "PNG")
# Convert lidar.
# Note that we are only using a single sweep, instead of the commonly used n sweeps.
src_lid_path = os.path.join(self.nusc.dataroot, filename_lid_full)
dst_lid_path = os.path.join(lidar_folder, sample_token + '.bin')
assert not dst_lid_path.endswith('.pcd.bin')
pcl = LidarPointCloud.from_file(src_lid_path)
pcl.rotate(kitti_to_nu_lidar_inv.rotation_matrix) # In KITTI lidar frame.
with open(dst_lid_path, "w") as lid_file:
pcl.points.T.tofile(lid_file)
# Add to tokens.
tokens.append(sample_token)
# Create calibration file.
kitti_transforms = dict()
kitti_transforms['P0'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['P1'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['P2'] = p_left_kitti # Left camera transform.
kitti_transforms['P3'] = np.zeros((3, 4)) # Dummy values.
kitti_transforms['R0_rect'] = r0_rect.rotation_matrix # Cameras are already rectified.
kitti_transforms['Tr_velo_to_cam'] = np.hstack((velo_to_cam_rot, velo_to_cam_trans.reshape(3, 1)))
kitti_transforms['Tr_imu_to_velo'] = imu_to_velo_kitti
calib_path = os.path.join(calib_folder, sample_token + '.txt')
with open(calib_path, "w") as calib_file:
for (key, val) in kitti_transforms.items():
val = val.flatten()
val_str = '%.12e' % val[0]
for v in val[1:]:
val_str += ' %.12e' % v
calib_file.write('%s: %s\n' % (key, val_str))
# Write label file.
label_path = os.path.join(label_folder, sample_token + '.txt')
if os.path.exists(label_path):
print('Skipping existing file: %s' % label_path)
continue
else:
print('Writing file: %s' % label_path)
with open(label_path, "w") as label_file:
for sample_annotation_token in sample_annotation_tokens:
sample_annotation = self.nusc.get('sample_annotation', sample_annotation_token)
# Get box in LIDAR frame.
_, box_lidar_nusc, _ = self.nusc.get_sample_data(lidar_token, box_vis_level=BoxVisibility.NONE,
selected_anntokens=[sample_annotation_token])
box_lidar_nusc = box_lidar_nusc[0]
# Truncated: Set all objects to 0 which means untruncated.
truncated = 0.0
# Occluded: Set all objects to full visibility as this information is not available in nuScenes.
occluded = 0
# Convert nuScenes category to nuScenes detection challenge category.
detection_name = category_to_detection_name(sample_annotation['category_name'])
# Skip categories that are not part of the nuScenes detection challenge.
if detection_name is None:
continue
# Convert from nuScenes to KITTI box format.
box_cam_kitti = KittiDB.box_nuscenes_to_kitti(
box_lidar_nusc, Quaternion(matrix=velo_to_cam_rot), velo_to_cam_trans, r0_rect)
# Project 3d box to 2d box in image, ignore box if it does not fall inside.
bbox_2d = KittiDB.project_kitti_box_to_image(box_cam_kitti, p_left_kitti, imsize=imsize)
if bbox_2d is None:
continue
# Set dummy score so we can use this file as result.
box_cam_kitti.score = 0
# Convert box to output string format.
output = KittiDB.box_to_string(name=detection_name, box=box_cam_kitti, bbox_2d=bbox_2d,
truncation=truncated, occlusion=occluded)
# Write to disk.
label_file.write(output + '\n')
def render_kitti(self, render_2d: bool) -> None:
"""
Renders the annotations in the KITTI dataset from a lidar and a camera view.
:param render_2d: Whether to render 2d boxes (only works for camera data).
"""
if render_2d:
print('Rendering 2d boxes from KITTI format')
else:
print('Rendering 3d boxes projected from 3d KITTI format')
# Load the KITTI dataset.
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split,))
# Create output folder.
render_dir = os.path.join(self.nusc_kitti_dir, 'render')
if not os.path.isdir(render_dir):
os.mkdir(render_dir)
# Render each image.
for token in kitti.tokens[:self.image_count]:
for sensor in ['lidar', 'camera']:
out_path = os.path.join(render_dir, '%s_%s.png' % (token, sensor))
print('Rendering file to disk: %s' % out_path)
kitti.render_sample_data(token, sensor_modality=sensor, out_path=out_path, render_2d=render_2d)
plt.close() # Close the windows to avoid a warning of too many open windows.
def kitti_res_to_nuscenes(self, meta: Dict[str, bool] = None) -> None:
"""
Converts a KITTI detection result to the nuScenes detection results format.
:param meta: Meta data describing the method used to generate the result. See nuscenes.org/object-detection.
"""
# Dummy meta data, please adjust accordingly.
if meta is None:
meta = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
# Init.
results = {}
# Load the KITTI dataset.
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split, ))
# Get assignment of scenes to splits.
split_logs = create_splits_logs(self.split, self.nusc)
# Use only the samples from the current split.
sample_tokens = self._split_to_samples(split_logs)
sample_tokens = sample_tokens[:self.image_count]
for sample_token in sample_tokens:
# Get the KITTI boxes we just generated in LIDAR frame.
kitti_token = '%s_%s' % (self.split, sample_token)
boxes = kitti.get_boxes(token=kitti_token)
# Convert KITTI boxes to nuScenes detection challenge result format.
sample_results = [self._box_to_sample_result(sample_token, box) for box in boxes]
# Store all results for this image.
results[sample_token] = sample_results
# Store submission file to disk.
submission = {
'meta': meta,
'results': results
}
submission_path = os.path.join(self.nusc_kitti_dir, 'submission.json')
print('Writing submission to: %s' % submission_path)
with open(submission_path, 'w') as f:
json.dump(submission, f, indent=2)
def _box_to_sample_result(self, sample_token: str, box: Box, attribute_name: str = '') -> Dict[str, Any]:
# Prepare data
translation = box.center
size = box.wlh
rotation = box.orientation.q
velocity = box.velocity
detection_name = box.name
detection_score = box.score
# Create result dict
sample_result = dict()
sample_result['sample_token'] = sample_token
sample_result['translation'] = translation.tolist()
sample_result['size'] = size.tolist()
sample_result['rotation'] = rotation.tolist()
sample_result['velocity'] = velocity.tolist()[:2] # Only need vx, vy.
sample_result['detection_name'] = detection_name
sample_result['detection_score'] = detection_score
sample_result['attribute_name'] = attribute_name
return sample_result
def _split_to_samples(self, split_logs: List[str]) -> List[str]:
"""
Convenience function to get the samples in a particular split.
:param split_logs: A list of the log names in this split.
:return: The list of samples.
"""
samples = []
for sample in self.nusc.sample:
scene = self.nusc.get('scene', sample['scene_token'])
log = self.nusc.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
return samples
if __name__ == '__main__':
fire.Fire(KittiConverter)
|
the-stack_0_20449 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class BuServiceLevel(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuServiceLevel - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'include': 'bool',
'percent': 'int',
'seconds': 'int'
}
self.attribute_map = {
'include': 'include',
'percent': 'percent',
'seconds': 'seconds'
}
self._include = None
self._percent = None
self._seconds = None
@property
def include(self):
"""
Gets the include of this BuServiceLevel.
Whether to include service level targets in the associated configuration
:return: The include of this BuServiceLevel.
:rtype: bool
"""
return self._include
@include.setter
def include(self, include):
"""
Sets the include of this BuServiceLevel.
Whether to include service level targets in the associated configuration
:param include: The include of this BuServiceLevel.
:type: bool
"""
self._include = include
@property
def percent(self):
"""
Gets the percent of this BuServiceLevel.
Service level target percent answered. Required if include == true
:return: The percent of this BuServiceLevel.
:rtype: int
"""
return self._percent
@percent.setter
def percent(self, percent):
"""
Sets the percent of this BuServiceLevel.
Service level target percent answered. Required if include == true
:param percent: The percent of this BuServiceLevel.
:type: int
"""
self._percent = percent
@property
def seconds(self):
"""
Gets the seconds of this BuServiceLevel.
Service level target answer time. Required if include == true
:return: The seconds of this BuServiceLevel.
:rtype: int
"""
return self._seconds
@seconds.setter
def seconds(self, seconds):
"""
Sets the seconds of this BuServiceLevel.
Service level target answer time. Required if include == true
:param seconds: The seconds of this BuServiceLevel.
:type: int
"""
self._seconds = seconds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.