ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b406339cb3f5b134ee392d7c55282e015116bc25 | # This file is part of Radicale Server - Calendar Server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <[email protected]>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import configparser
import re
from radicale import pathutils, rights
from radicale.log import logger
class Rights(rights.BaseRights):
def __init__(self, configuration):
super().__init__(configuration)
self.filename = configuration.get("rights", "file")
def authorized(self, user, path, permissions):
user = user or ""
sane_path = pathutils.strip_path(path)
# Prevent "regex injection"
user_escaped = re.escape(user)
sane_path_escaped = re.escape(sane_path)
rights_config = configparser.ConfigParser({"login": user_escaped, "path": sane_path_escaped})
try:
if not rights_config.read(self.filename):
raise j.exceptions.Base("No such file: %r" % self.filename)
except Exception as e:
raise j.exceptions.Base("Failed to load rights file %r: %s" % (self.filename, e)) from e
for section in rights_config.sections():
try:
user_pattern = rights_config.get(section, "user")
collection_pattern = rights_config.get(section, "collection")
user_match = re.fullmatch(user_pattern, user)
collection_match = user_match and re.fullmatch(
collection_pattern.format(*map(re.escape, user_match.groups())), sane_path
)
except Exception as e:
raise j.exceptions.Base(
"Error in section %r of rights file %r: " "%s" % (section, self.filename, e)
) from e
if user_match and collection_match:
logger.debug(
"Rule %r:%r matches %r:%r from section %r",
user,
sane_path,
user_pattern,
collection_pattern,
section,
)
return rights.intersect_permissions(permissions, rights_config.get(section, "permissions"))
else:
logger.debug(
"Rule %r:%r doesn't match %r:%r from section %r",
user,
sane_path,
user_pattern,
collection_pattern,
section,
)
logger.info("Rights: %r:%r doesn't match any section", user, sane_path)
return ""
|
py | b40635dd3420afb9476ac979afaa29c949c767a5 | from django.contrib import admin
from .models import Prediction
admin.site.register(Prediction)
|
py | b40635e60ca74428eeec2453e8c2fe2b3171d5f9 | try:
import unittest2 as unittest
except ImportError:
import unittest
from rope.base.oi import objectdb, memorydb
from ropetest import testutils
def _do_for_all_dbs(function):
def called(self):
for db in self.dbs:
function(self, db)
return called
class _MockValidation(object):
def is_value_valid(self, value):
return value != -1
def is_more_valid(self, new, old):
return new != -1
def is_file_valid(self, path):
return path != 'invalid'
def is_scope_valid(self, path, key):
return path != 'invalid' and key != 'invalid'
class _MockFileListObserver(object):
log = ''
def added(self, path):
self.log += 'added %s ' % path
def removed(self, path):
self.log += 'removed %s ' % path
class ObjectDBTest(unittest.TestCase):
def setUp(self):
super(ObjectDBTest, self).setUp()
self.project = testutils.sample_project()
validation = _MockValidation()
self.dbs = [
objectdb.ObjectDB(memorydb.MemoryDB(self.project), validation)]
def tearDown(self):
for db in self.dbs:
db.write()
testutils.remove_project(self.project)
super(ObjectDBTest, self).tearDown()
@_do_for_all_dbs
def test_simple_per_name(self, db):
db.add_pername('file', 'key', 'name', 1)
self.assertEqual(1, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_simple_per_name_does_not_exist(self, db):
self.assertEquals(None, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_simple_per_name_after_syncing(self, db):
db.add_pername('file', 'key', 'name', 1)
db.write()
self.assertEquals(1, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_getting_returned(self, db):
db.add_callinfo('file', 'key', (1, 2), 3)
self.assertEquals(3, db.get_returned('file', 'key', (1, 2)))
@_do_for_all_dbs
def test_getting_returned_when_does_not_match(self, db):
db.add_callinfo('file', 'key', (1, 2), 3)
self.assertEquals(None, db.get_returned('file', 'key', (1, 1)))
@_do_for_all_dbs
def test_getting_call_info(self, db):
db.add_callinfo('file', 'key', (1, 2), 3)
call_infos = list(db.get_callinfos('file', 'key'))
self.assertEquals(1, len(call_infos))
self.assertEquals((1, 2), call_infos[0].get_parameters())
self.assertEquals(3, call_infos[0].get_returned())
@_do_for_all_dbs
def test_invalid_per_name(self, db):
db.add_pername('file', 'key', 'name', -1)
self.assertEquals(None, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_overwriting_per_name(self, db):
db.add_pername('file', 'key', 'name', 1)
db.add_pername('file', 'key', 'name', 2)
self.assertEquals(2, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_not_overwriting_with_invalid_per_name(self, db):
db.add_pername('file', 'key', 'name', 1)
db.add_pername('file', 'key', 'name', -1)
self.assertEquals(1, db.get_pername('file', 'key', 'name'))
@_do_for_all_dbs
def test_getting_invalid_returned(self, db):
db.add_callinfo('file', 'key', (1, 2), -1)
self.assertEquals(None, db.get_returned('file', 'key', (1, 2)))
@_do_for_all_dbs
def test_not_overwriting_with_invalid_returned(self, db):
db.add_callinfo('file', 'key', (1, 2), 3)
db.add_callinfo('file', 'key', (1, 2), -1)
self.assertEquals(3, db.get_returned('file', 'key', (1, 2)))
@_do_for_all_dbs
def test_get_files(self, db):
db.add_callinfo('file1', 'key', (1, 2), 3)
db.add_callinfo('file2', 'key', (1, 2), 3)
self.assertEquals(set(['file1', 'file2']), set(db.get_files()))
@_do_for_all_dbs
def test_validating_files(self, db):
db.add_callinfo('invalid', 'key', (1, 2), 3)
db.validate_files()
self.assertEquals(0, len(db.get_files()))
@_do_for_all_dbs
def test_validating_file_for_scopes(self, db):
db.add_callinfo('file', 'invalid', (1, 2), 3)
db.validate_file('file')
self.assertEquals(1, len(db.get_files()))
self.assertEquals(0, len(list(db.get_callinfos('file', 'invalid'))))
@_do_for_all_dbs
def test_validating_file_moved(self, db):
db.add_callinfo('file', 'key', (1, 2), 3)
db.file_moved('file', 'newfile')
self.assertEquals(1, len(db.get_files()))
self.assertEquals(1, len(list(db.get_callinfos('newfile', 'key'))))
@_do_for_all_dbs
def test_using_file_list_observer(self, db):
db.add_callinfo('invalid', 'key', (1, 2), 3)
observer = _MockFileListObserver()
db.add_file_list_observer(observer)
db.validate_files()
self.assertEquals('removed invalid ', observer.log)
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(ObjectDBTest))
return result
if __name__ == '__main__':
unittest.main()
|
py | b40635e784f7c6a1e956a9b8334c1b60c4b66407 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more roicoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.create_callback_map()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of [obj, True/False/None]:
# - obj is either a CBlock or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=[], sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# --> Answer request (we did this inline!)
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# --> Answer request (we did this inline!)
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome ] = [ None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for b_or_t, outcome in test_instance.blocks_and_transactions:
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
# Add to shared block_store, set as current block
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(block.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256,
len(test_instance.blocks_and_transactions))
if (not self.check_results(block.sha256, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
py | b40636c1308b5703bf5e2cec52a7a7660c4b5fa8 | # Generated by Django 3.1.3 on 2020-12-04 03:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('housing', '0003_auto_20201203_2159'),
]
operations = [
migrations.AlterField(
model_name='rooms',
name='floor',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='housing.floors'),
),
]
|
py | b40636f39dba8e6a74f6a331ab1b27a0b72a11e0 | import itertools
import os
import re
import time
import random
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy import spatial
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
import pymorphy2
from nltk.corpus import wordnet
from utils import load_embeddings, get_now, annotate
from utils_d.ml_models import CnnLstm, train_gbm
def score_khodak(path):
"""
unfinished
"""
with open("other_works/pawn/ru_matches.txt") as f:
r = f.read()
# split by POS
r = r.split("#")
# leave only NOUNS
r = r[1]
r = r.split("\n")
r = [l.split("\t") for l in r]
wordnet_list = list()
key = None
for l in r:
if not l[0]:
continue
elif ":" in l[0]:
key = l[1]
else:
wordnet_list.append([key] + l)
khodak = pd.DataFrame(
wordnet_list, columns=["ru", "score", "synset", "definition"])
my_df = pd.read_csv("annotations/wordnet3_ru_without_duplicates.csv")
# t_p = 0
# f_p = 0
# t_n = 0
# f_n = 0
for index, row in khodak.iterrows():
if row["ru"] in my_df["class10_ru"]:
print(row["ru"])
else:
print(row["ru"])
pass
def get_ru_relations(
w0_i, word_0, gbm, word_matrix, allowed_words,
emb_norm, parts_of_speech,
pos_filter=False):
print("\t", w0_i, word_0, end="\r")
emb_0 = word_matrix[w0_i]
# distances = 1 - spatial.distance.cosine(emb_0, emb_1)
if pos_filter:
pos = parts_of_speech[w0_i]
pos_mask = parts_of_speech == pos
else:
pos_mask = [i for i in range(len(allowed_words))]
pos_matrix = word_matrix[pos_mask]
pos_words = allowed_words[pos_mask]
distances = np.matmul(emb_0, pos_matrix.T)
norm = emb_norm[w0_i] * emb_norm[pos_mask].T
distances = distances.T / norm
distances = distances[0]
embs_1 = [list(emb_0) + list(word_matrix[j]) + [distances[j]]
for j in range(len(pos_words))]
embs_1 = np.array(embs_1)
preds = gbm.predict(embs_1)
max_preds = np.argmax(preds, axis=1)
scores = preds[np.arange(len(preds)), max_preds]
word_hypernyms = []
word_synonyms = []
for s_i, s in enumerate(scores):
if s >= 0.5 and max_preds[s_i] in (0, 1, 3):
pred = max_preds[s_i]
if pred == 0:
word_1 = pos_words[s_i]
word_hypernyms.append((word_0, word_1, s))
print("hyp", word_0, word_1, s, pred)
elif pred == 1:
word_1 = pos_words[s_i]
word_synonyms.append((word_0, word_1, s))
print("syn", word_0, word_1, s, pred)
# reverse hypernyms
elif pred == 3:
word_1 = pos_words[s_i]
word_hypernyms.append((word_1, word_0, s))
print("hyp", word_1, word_0, s, pred)
return word_hypernyms, word_synonyms
def april_new():
lang = "en"
# bad wording; but if False - then only
# hypernym: hyponym
# hypernym: random_word
# wordnet_langs = ["fin", "pol"]
# wordnet.synsets("kod_pocztowy", lang="pol")[0].hypernyms()
parse_synonyms = True
zaliznak_filter = True
pos_filter = False
nouns_only = False
morphy_filter = True
now = get_now()
folder = f"models/{now}"
if nouns_only:
folder += "_nouns"
os.mkdir(folder)
eng_emb = load_embeddings(lang)
# analyse_collocations(eng_emb)
# 117659 words
eng_words = {w for w in wordnet.all_synsets()}
# 87943
# eng_words = {w for w in eng_words if w.hyponyms() or w.hypernyms()}
# 43647 synsets; 24099 words -> 63314
eng_words = {w for w in eng_words if
w.name().split(".")[0] in eng_emb.vocab}
pos_dataset = {(w.pos(), w.name().split(".")[0]) for w in eng_words}
pos_df = pd.DataFrame(pos_dataset, columns=["y", "x"])
pos_x = [eng_emb[w] for w in pos_df["x"].values]
pos_x = np.array(pos_x)
pos_y = pos_df["y"].astype("category")
pos_y = pos_y.cat.codes.values
pos_x_train, pos_x_test, pos_y_train, pos_y_test = train_test_split(
pos_x, pos_y,
test_size=0.2, random_state=42, stratify=pos_df["y"])
pos_gbm = train_gbm(pos_x_train, pos_x_test, pos_y_train, pos_y_test,
f"{folder}/pos_")
# eng_words = {w for w in eng_words if w.name().split(".")[0]}
# sets are slow for random.sample
sample_words = list({w.name().split(".")[0] for w in eng_words})
wordnet_dict = dict()
x = []
# dataset consists of:
# Class 0: hypernym hyponym
# Class 1: hyponym hyponym
# Class 2: hypernym random_word
# hyponym random_word
# Class 3: hyponym hypernym
# all three classes are +/- balanced
for e_i, e in enumerate(eng_words):
print("\t", e_i, end="\r")
hyponyms = e.hyponyms()
hypernyms = e.hypernyms()
e = e.name().split(".")[0]
if hyponyms:
hyponyms = {h.name().split(".")[0] for h in hyponyms}
hyponyms = {h for h in hyponyms if h in eng_emb}
if e not in wordnet_dict:
wordnet_dict[e] = hyponyms
else:
wordnet_dict[e].update(hyponyms)
for h in hyponyms:
# hypernym hyponym
x.append((e, h, 0))
if parse_synonyms:
# hyponym hypernym
x.append((h, e, 3))
if parse_synonyms:
# hyponym hyponym
combinations = create_syn_combinations(hyponyms)
x += combinations
if hypernyms:
hypernyms = {h.name().split(".")[0] for h in hypernyms}
hypernyms = {h for h in hypernyms if h in eng_emb}
for h in hypernyms:
# hypernym hyponym
x.append((h, e, 0))
if parse_synonyms:
# hyponym hypernym
x.append((e, h, 3))
if h not in wordnet_dict:
wordnet_dict[h] = {e}
else:
wordnet_dict[h].add(e)
# hyponym hyponym
if parse_synonyms:
combinations = create_syn_combinations(hypernyms)
x += combinations
x = set(x)
# add some random words to the algorithm
for e_i, e in enumerate(eng_words):
print("\t", e_i, end="\r")
related = {w.name().split(".")[0] for w in e.hypernyms()}
related.update({w.name().split(".")[0] for w in e.hyponyms()})
e = e.name().split(".")[0]
word = random.choice(sample_words)
if word not in related:
if e_i % 2 == 0:
x.add((e, word, 2))
else:
x.add((word, e, 2))
df = pd.DataFrame(x, columns=[1, 2, "target"])
df = df[df[1] != df[2]]
df.groupby("target").count()[1]
# 20378 words
all_words = set(df[1].values).union(set(df[2].values))
# transform words into their ids
all_words = list(all_words)
word_index = {w: i for i, w in enumerate(all_words)}
word_matrix = [eng_emb[w] for w in all_words]
word_matrix = np.array(word_matrix)
with open(f'{folder}/word_index_syn{parse_synonyms}.json', 'w') as outfile:
json.dump(word_index, outfile)
wordnet_dict = {word_index[k]: {word_index[s] for s in v}
for k, v in wordnet_dict.items()}
with open(f'{folder}/wordnet_dict_syn{parse_synonyms}.json', 'w')\
as outfile:
json.dump({k: list(v) for k, v in wordnet_dict.items()}, outfile)
# words to their embeddings
x = df[[1, 2]].values
y = df["target"].values
x = [[word_index[w] for w in row] for row in x]
x = [[word_matrix[t] for t in row] for row in x]
cosine = [1 - spatial.distance.cosine(r[0], r[1]) for r in x]
x = [list(t[0]) + list(t[1]) + [cosine[t_i]] for t_i, t in enumerate(x)]
x = np.array(x)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.1, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.1, random_state=42)
# cnn_model = train_cnn(
# all_words, eng_emb, x_train, x_test, y_train, y_test, word_matrix)
gbm = train_gbm(x_train, x_val, y_train, y_val, f"{folder}/main")
ru_emb = load_embeddings("ru")
# 200000 words
allowed_words = list(ru_emb.vocab)
# 448
ru_collocations = [w for w in allowed_words if "_" in w]
# 297
ru_collocations = [w for w in ru_collocations if
len([l for l in w.split("_") if l]) > 1]
# 146
ru_collocations = [w for w in ru_collocations
if not re.fullmatch("[a-zA-Z_]+", w)]
# 108
ru_collocations = [w for w in ru_collocations
if re.fullmatch("[ёЁа-яА-Я_-]+", w)]
# leave only words from opencorpora.org
# 139674 words; open corpora and ru_emb preprocessings are not compatible
# allowed_words = [w for w in allowed_words if
# morph.word_is_known(w) or any(s in w for s in "_-")]
# allowed_words = [w.replace("ё", "е") for w in allowed_words]
# 163569 words
allowed_words = [w for w in allowed_words
if re.fullmatch("[а-яА-Я]+[а-яА-Я_]+[а-яА-Я]+", w)]
allowed_words = [w for w in allowed_words if len(w) > 3]
if zaliznak_filter:
with open("zaliznak.txt") as f:
zaliznak = f.readlines()
zaliznak = [l.strip() for l in zaliznak]
allowed_words = [w for w in allowed_words
if w in zaliznak or "_" in w]
if morphy_filter or nouns_only:
morph = pymorphy2.MorphAnalyzer()
if morphy_filter:
morphed = [morph.parse(w)[0] for w in allowed_words]
# Sgtm - singularia tantum; geox - geographical
bad_tags = {"COMP", "Name", "plur", "Geox",
"NPRO", # местоимение-существительное
"PREP",
"CONJ",
"PRCL",
"INTJ",
# non-nominative cases
"gent", "datv", "accs", "ablt", "loct",
"voct", "gen1", "gen2",
"acc2", "loc1", "loc2",
# names
"Surn", "Patr", "Orgn", "Trad",
# verb grammar
"past", "futr", "impr", "incl", "excl", "pssv"
}
allowed_words = [w for w_i, w in enumerate(allowed_words)
if not any(t in morphed[w_i].tag for t in bad_tags)]
if nouns_only:
allowed_words = [w for w_i, w in enumerate(allowed_words)
if "NOUN" in morph.parse(w)[0].tag]
# and
# all(t in morphed[w_i].tag for t in good_tags)]
# allowed_words = [w for w in allowed_words if len(w) < 17 or "_" in w]
word_matrix = np.array([ru_emb[w] for w in allowed_words])
allowed_words = np.array(allowed_words)
emb_norm = np.linalg.norm(word_matrix, axis=1)[np.newaxis].T
parts_of_speech = np.argmax(pos_gbm.predict(word_matrix), axis=1)
ru_synonyms = []
ru_hypernyms = []
# irange = [(w0_i, word_0, gbm, word_matrix, allowed_words, emb_norm)
# for w0_i, word_0 in enumerate(allowed_words)]
# pool = mp.pool.ThreadPool(4)
for w0_i, word_0 in enumerate(allowed_words):
word_hypernyms, word_synonyms = get_ru_relations(
w0_i, word_0, gbm, word_matrix, allowed_words, emb_norm,
parts_of_speech, pos_filter=pos_filter)
ru_hypernyms += word_hypernyms
ru_synonyms += word_synonyms
print("allowed words", len(allowed_words))
time.sleep(10)
for filename, file in [(f"{folder}/synonyms", ru_synonyms),
(f"{folder}/hypernyms", ru_hypernyms)]:
with open("{}_{}_{}".format(filename,
len(allowed_words),
now),
"a") as f:
for line_i, line in enumerate(file):
f.write("\t".join([str(w) for w in line]) + "\n")
# pickle.dump(ru_synonyms, open("ru_synonyms_zaliznak.pcl", "wb"))
# pickle.dump(ru_hypernyms, open("ru_hypernyms_zaliznak.pcl", "wb"))
# In [27]: ru_wordnet = dict()
# ...: for r in ru_hypernyms:
# ...: k, v = r
# ...: if k not in ru_wordnet:
# ...: ru_wordnet[k] = set([v])
# ...: else:
# ...: ru_wordnet[k].add(v)
# for w1_i, word_1 in enumerate(allowed_words):
# if w0_i == w1_i:
# continue
# # The function is not symmetric
# # if w1_i > w0_i:
# # continue
# emb_1 = word_matrix[w1_i]
# dist = distances[word_1]
# gbm_input = np.array(list(emb_0) + list(emb_1) + [dist])
# pred = gbm.predict([gbm_input])[0]
# max_pred = np.argmax(pred)
# score = pred[max_pred]
# pred = max_pred
# if score < 0.9:
# continue
return None
def train_cnn(
all_words, eng_emb, x_train, x_test, y_train, y_test, word_matrix):
kwargs = {
"voc_size": len(all_words) - 1,
"sequence_len": 2,
"vec_len": eng_emb.vector_size,
"categ_nums": [3],
"name": "w2v/",
"use_generator": False,
"x_train": x_train,
"x_test": x_test,
"y_train": y_train,
"y_test": y_test,
"embedding_matrix": word_matrix,
"layers_multiplier": 1,
# "kernel_size": 1,
# "pool_size": 1,
"trainable_embeddings": False,
"use_embeddings": False
}
cnn = CnnLstm(**kwargs)
model = cnn.cnn_lstm_classification()
return model
def tf_model(x_train, y_train, x_test, y_test):
"""Model function for CNN."""
# Input Layer; concatenated embeddings
with tf.Session() as sess:
batch_size = 128
emb = tf.placeholder(shape=(None, 600), name='emb', dtype=tf.float32)
dense_0 = tf.layers.dense(emb, 512, activation='relu')
dense_1 = tf.layers.dense(dense_0, 256, activation='relu')
dense_2 = tf.layers.dense(dense_1, 3, activation='relu')
labels = tf.placeholder(shape=(None), name='labels', dtype=tf.int64)
output = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=dense_2)
cost = tf.reduce_mean(output)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
accuracy, update_op = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(dense_1, 1),
name='accuracy')
patience = 5
train_loss_results = []
train_accuracy_results = []
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for epoch in range(1000):
epoch_loss = 0
epoch_acc = 0
epoch_range = int(len(x_train) / batch_size) + 1
for i in range(epoch_range):
batch_start = batch_size * i
batch_end = batch_start + batch_size
epoch_x = x_train[batch_start: batch_end]
epoch_y = y_train[batch_start: batch_end]
batch_opt, batch_cost, batch_acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={emb: epoch_x, labels: epoch_y})
epoch_loss += batch_cost
epoch_acc += batch_acc
print(f"Epoch: {epoch}; loss: {batch_cost}; "
f"acc: {batch_acc}", end="\r")
epoch_acc /= epoch_range
epoch_loss /= epoch_range
print(f"\nEpoch: {epoch}; loss: {epoch_loss}; "
f"acc: {epoch_acc}")
val_range = int(len(x_test) / batch_size) + 1
val_loss = 0
acc_val = 0
for i in range(val_range):
batch_start = batch_size * i
batch_end = batch_start + batch_size
val_x = x_test[batch_start: batch_end]
val_y = y_test[batch_start: batch_end]
batch_cost, epoch_acc = sess.run(
[cost, accuracy],
feed_dict={emb: val_x, labels: val_y})
val_loss += batch_cost
acc_val += epoch_acc
val_loss /= val_range
acc_val /= val_range
if epoch == 0:
tf.saved_model.simple_save(sess, 'w2v/tf/my-model',
inputs={"emb": emb},
outputs={"output": output})
else:
if val_loss < train_loss_results[-1]:
tf.saved_model.simple_save(sess, 'w2v/tf/my-model',
inputs={"emb": emb},
outputs={"output": output})
else:
patience -= 1
train_accuracy_results.append(acc_val)
train_loss_results.append(val_loss)
print("\n", epoch_loss, "Train accuracy:",
acc_val, "Val accuracy:", acc_val)
if patience <= 0:
break
print("training complete")
def wordnet_to_df():
model = load_embeddings("en")
hyponyms = [w for w in wordnet.all_synsets()]
hyponyms = [w for w in hyponyms if not w.hyponyms()]
hyponyms = [w for w in hyponyms if w.hypernyms()]
hyponyms = [w for w in hyponyms if ".n." in w.name()]
hyponyms = [h for h in hyponyms if h.name().split(".")[0] in model.vocab]
top_level = 10
df = pd.DataFrame()
df[f"class{top_level}_synsets"] = hyponyms
names = [h.name().split(".")[0] for h in hyponyms]
df[f"class{top_level}_name"] = names
# get_averaged_vectors requires a list of strings
df[f"class{top_level}_vectors"] = [model[n] for n in names]
# get hyponyms for English wordnet
for i in range(top_level - 1, -1, -1):
print("\t", i, end="\r")
hypernyms = [h.hypernyms() if h else None
for h in df[f"class{i+1}_synsets"].values]
hypernyms = [h[0] if h else None for h in hypernyms]
df[f"class{i}_synsets"] = hypernyms
df[f"class{i}_name"] = [h.name() if h else None for h in hypernyms]
df = get_vectors_from_prev_synset(df, i, "en")
print(df["class0_synsets"].drop_duplicates().dropna())
lang = "ru"
model_ru = load_embeddings(lang)
if lang == "ru":
nouns = get_nouns_from_model(model_ru)
# get Non-English (Russian) wordnet words
# first we get vectors from English words
# after that we use hierarchical Russian vectors
use_russian_vectors = False
for i in range(top_level, -1, -1):
print("\n")
if i == top_level:
vectors = df[f"class{i}_vectors"].dropna()
else:
if use_russian_vectors:
vectors = df[f"class{i}_vectors_{lang}"].dropna()
else:
vectors = df[f"class{i}_vectors"].dropna()
df[f"class{i}_{lang}"] = ""
df[f"class{i}_sim"] = 0
index = vectors.index
for j, v in enumerate(vectors.values):
print("\t", i, j, end="\r")
if i != top_level:
prev_word = df.loc[index[j]][f"class{i+1}_{lang}"]
else:
prev_word = ""
most_similar = model_ru.most_similar([v], topn=100)
most_similar = [m for m in most_similar
if m[0] in nouns and m[0] != prev_word]
if not most_similar:
continue
most_similar = most_similar[0]
ru_word = most_similar[0]
similarity = most_similar[1]
if similarity > 0.5:
df.loc[index[j], f"class{i}_{lang}"] = ru_word
df.loc[index[j], f"class{i}_sim"] = similarity
# create Russian vectors for the next level
if use_russian_vectors:
if i == top_level:
# get vectors for russian words
df[f"class{i}_vectors_{lang}"] = [
model_ru[n] if n in model_ru else None
for n in df[f"class{i}_{lang}"].values]
if i != 0:
df = get_vectors_from_prev_synset(df, i - 1)
i = 9
closest = df[df[f"class{i}_sim"] > 0.8]
closest[[f"class{i}_name", f"class{i}_{lang}", f"class{i}_sim"]]
unique_ru = df[f"class{i}_ru"].drop_duplicates().dropna()
unique_ru = unique_ru[unique_ru != '']
for u_r in unique_ru:
df[f"class{i}_ru"] == u_r
i = 9
group = df.groupby(f"class{i}_ru").count()
group = group[group["class10_ru"] < 1000]["class10_ru"]
words = group[group > 2].index
for word in words:
print(df[df[f"class{i}_ru"] == word][[
"class10_ru", "class9_ru",
"class10_name", "class9_name",
"class9_sim"]])
# save wordnet to csv
df[[col for col in df.columns if
any(w in col for w in ("_ru", "_sim", "_col", "name"))
]].to_csv(f"wordnet3_{lang}_without_duplicates.csv")
return df
def get_vectors_from_prev_synset(df, i, lang="ru"):
if lang == "ru":
suffix = "_ru"
else:
suffix = ""
i_vecs = f"class{i}_vectors{suffix}"
if i < 9:
upper_vecs = f"class{i+1}_vectors{suffix}"
else:
upper_vecs = f"class{i+1}_vectors"
i_synsets = f"class{i}_synsets"
df[i_vecs] = None
# df[f"class{i}_vectors{lang}"] = df[f"class{i}_vectors"].astype('object')
unique_synsets = df[i_synsets].drop_duplicates().dropna()
# unique_synsets = unique_synsets[unique_synsets != '']
# unique_synsets = df[f"class{i}_synsets"].dropna()
index = unique_synsets.index
print("\n\n")
for u_i, u_s in enumerate(unique_synsets):
print("\t", u_i, end="\r")
# this level synsets having this homonym
# (being hypernyms of the prev level)
vectors = df[df[i_synsets] == u_s]
vectors = vectors[upper_vecs]
vectors = vectors.dropna()
vectors = np.mean(vectors)
df.at[index[u_i], i_vecs] = vectors
return df
def get_nouns_from_model(model):
from rnnmorph.predictor import RNNMorphPredictor
rnn_morph_predictor = RNNMorphPredictor(language="ru")
nouns = list()
for j, w in enumerate(model.vocab):
print("\t", j, end="\r")
parse = rnn_morph_predictor.predict([w])[0]
if parse.pos == "NOUN" and\
"Case=Nom" in parse.tag and "Number=Sing" in parse.tag:
nouns.append(w)
nouns = set(nouns)
return nouns
def analyse_collocations(eng_emb):
collocations = [w for w in eng_emb.vocab if "_" in w]
collocations = [c for c in collocations
if all(w in eng_emb.vocab for w in c.split("_")) and
len(c.split("_")) > 1]
col_vectors = np.array([eng_emb[c] for c in collocations])
print(col_vectors.shape)
col_vectors = np.array([eng_emb[c] for c in collocations])
w_vectors = [np.mean([eng_emb[w] for w in c.split("_") if w], axis=0)
for c in collocations]
w_vectors = np.array(w_vectors)
# similarities; large is closer
sim = [1 - spatial.distance.cosine(w_vectors[w_i], col_vectors[w_i])
for w_i, w in enumerate(w_vectors)]
print(np.median(sim))
def create_syn_combinations(input_iter):
combinations = list(itertools.combinations(input_iter, 2))
combinations = combinations[:int(len(input_iter) * 1.5)]
combinations = [c + (1,) for c in combinations]
return combinations
def parse_hypernyms(filename="hypernyms_41800", khodak=None, vectorizer=None):
f = open(filename)
hypernyms = dict()
reverse = dict()
for i, line in enumerate(f):
if i % 10000 == 0:
print("\t", i, end="\r")
line = line.strip()
line = line.split("\t")
score = float(line[-1])
if score < 0.9:
continue
hyper = line[0]
hypo = line[1]
# if vectorizer:
# vec = vectorizer.transform([f"{hyper} {hypo}"]).todense()
# vec = vec[vec > 0]
# if vec.shape[1] > 1:
# new_score = score * vec.tolist()[0][1]
# else:
# continue
# if new_score < 0.8:
# continue
# else:
# print(hyper, hypo, score, new_score, vec.tolist()[0])
if hyper == hypo:
continue
if khodak:
if hyper not in khodak and hypo not in khodak:
continue
# vectorizer.fit([f"{hypo} {hyper}"])
# print(hyper, hypo, score)
if hypo not in reverse:
reverse[hypo] = {hyper}
else:
reverse[hypo].add(hyper)
if hyper not in hypernyms:
# hypo = (hypo, score) # !!!!!!
hypernyms[hyper] = {(hypo, score)}
else:
hypernyms[hyper].add((hypo, score))
# nature - bear
# nature - animal
# animal - bear
# remove bear from nature
for k, v in reverse.items():
to_delete = list()
for v_i, v_l in enumerate(v):
if v_l in reverse:
joined = reverse[v_l] & v
if joined:
for hyper_k in joined:
if k in hypernyms[hyper_k]:
hypernyms[hyper_k].pop(k, None)
to_delete.append(hyper_k)
reverse[k] = {v for v in reverse[k] if v not in to_delete}
# to_delete = set()
# for k, v in hypernyms.items():
# upper_hierarchy_words = dict()
# for word in v:
# if word in hypernyms:
# upper_hierarchy_words[word] = hypernyms[word]
# to_delete.add(word)
# hypernyms[k] = [w for w in v if w not in upper_hierarchy_word]
# for w in upper_hierarchy_word:
# print(k, word)
# hypernyms[k] =(hypernyms[w])
# hypernyms = {k: v for k, v in hypernyms.items()
# if k not in to_delete}
return hypernyms, reverse
def load_model():
with open("other_works/pawn/ru_matches.txt") as f:
khodak = f.readlines()
khodak = [l.strip().split("\t") for l in khodak if ":" in l]
khodak = [l[1] for l in khodak if len(l) == 2]
khodak = set(khodak)
hypernyms, reverse = parse_hypernyms(khodak, vectorizer=None)
lines = []
for k, v in reverse.items():
lines.append("{} {}".format(k, " ".join(v)))
for k, v in hypernyms.items():
lines.append("{} {}".format(k, " ".join(v)))
vectorizer = TfidfVectorizer()
vectorizer.fit(lines)
hypernyms_v, reverse_v = parse_hypernyms(khodak, vectorizer=vectorizer)
to_match = [(w, k) for k, v in reverse.items() for w in v]
random.shuffle(to_match)
match_df = pd.DataFrame(to_match)
annotate(match_df, "wordnet_nouns", 0, limit=200)
|
py | b40637b9789882c736f0032dc1b892fe9b43bf65 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
import copy
from datetime import date, tzinfo
import itertools
import os
import re
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
List,
Optional,
Tuple,
Type,
Union,
)
import warnings
import numpy as np
from pandas._config import config, get_option
from pandas._libs import lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas.compat._optional import import_optional_dependency
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCExtensionArray
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
concat,
isna,
)
from pandas._typing import ArrayLike, FrameOrSeries
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.index import ensure_index
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
from tables import File, Node, Col # noqa:F401
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode("UTF-8")
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# storer class map
_STORER_MAP = {
"series": "SeriesFixed",
"frame": "FrameFixed",
}
# table class map
_TABLE_MAP = {
"generic_table": "GenericTable",
"appendable_series": "AppendableSeriesTable",
"appendable_multiseries": "AppendableMultiSeriesTable",
"appendable_frame": "AppendableFrameTable",
"appendable_multiframe": "AppendableMultiFrameTable",
"worm": "WORMTable",
}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
except AttributeError:
pass
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf,
key: str,
value: FrameOrSeries,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool = False,
format: Optional[str] = None,
index: bool = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(
path_or_buf,
key=None,
mode: str = "r",
errors: str = "strict",
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
columns=None,
iterator=False,
chunksize: Optional[int] = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.h5``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
.. versionadded:: 0.21.0 support for __fspath__ protocol.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError("No dataset in HDF5 file.")
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 file "
"contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
try:
store.close()
except AttributeError:
pass
raise
def _is_metadata_of(group: "Node", parent_group: "Node") -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
_handle: Optional["File"]
_mode: str
_complevel: int
_fletcher32: bool
def __init__(
self,
path,
mode: str = "a",
complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
**kwargs,
):
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = _stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value):
self.put(key, value)
def __delitem__(self, key: str):
return self.remove(key)
def __getattr__(self, name: str):
""" allow attribute access to get stores """
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
""" check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self) -> List[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode: str = "a", **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except IOError as err: # pragma: no cover
if "can not be written" in str(err):
print(f"Opening {self._path} in read-only mode")
self._handle = tables.open_file(self._path, "r", **kwargs)
else:
raise
except ValueError as err:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if "FILE_OPEN_POLICY" in str(err):
hdf_version = tables.get_hdf5_version()
err = ValueError(
f"PyTables [{tables.__version__}] no longer supports "
"opening multiple files\n"
"even in read-only mode on this HDF5 version "
f"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
)
raise err
except Exception as err:
# trying to read from a non-existent file causes an error which
# is not part of IOError, make it one
if self._mode == "r" and "Unable to open/create file" in str(err):
raise IOError(str(err))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except OSError:
pass
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
Parameters
----------
key : str
Object being retrieved from file.
where : list, default None
List of Term (or convertible) objects, optional.
start : int, default None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list, default None
A list of columns that if not None, will limit the return columns.
iterator : bool, default False
Returns an iterator.
chunksize : int, default None
Number or rows to include in iteration, return an iterator.
auto_close : bool, default False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return the selection as an Index
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = list({t.non_index_axes[0][0] for t in _tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: FrameOrSeries,
format=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
):
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable.
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data.
append : bool, default False
This will force Table format, append the input data to the
existing.
data_columns : list, default None
List of columns to create as data columns, or True to
use all columns. See `here
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
dropna : bool, default False, do not write an ALL nan row to
The store settable by the option 'io.hdf.dropna_table'.
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def remove(self, key: str, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
)
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: FrameOrSeries,
format=None,
axes=None,
index=True,
append=True,
complib=None,
complevel: Optional[int] = None,
columns=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
chunksize=None,
expectedrows=None,
dropna: Optional[bool] = None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : bool, default False
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: Dict,
value,
selector,
data_columns=None,
axes=None,
dropna=False,
**kwargs,
):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values: List = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: Optional[int] = None,
kind: Optional[str] = None,
):
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium"
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self):
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
"""
_tables()
self._check_if_open()
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where="/"):
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Optional["Node"]:
""" return the node with the key or None if it does not exist """
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> Union["GenericFixed", "Table"]:
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode="w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: Optional[int] = None,
fletcher32: bool = False,
overwrite=True,
):
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes: bool, default True
Restore indexes in copied file.
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: Union[bool, list] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
.. versionadded:: 0.21.0
Returns
-------
str
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
""" validate / deprecate formats """
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError:
raise TypeError(f"invalid HDFStore format specified [{format}]")
return format
def _create_storer(
self,
group,
format=None,
value=None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> Union["GenericFixed", "Table"]:
""" return a suitable class to operate """
def error(t):
# return instead of raising so mypy can tell where we are raising
return TypeError(
f"cannot properly create the storer for: [{t}] [group->"
f"{group},value->{type(value)},format->{format}"
)
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
_TYPE_MAP = {Series: "series", DataFrame: "frame"}
try:
pt = _TYPE_MAP[type(value)]
except KeyError:
raise error("_TYPE_MAP")
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
try:
return globals()[_STORER_MAP[pt]](
self, group, encoding=encoding, errors=errors
)
except KeyError:
raise error("_STORER_MAP")
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
elif pt == "wide_table":
tt = "appendable_panel"
elif pt == "ndim_table":
tt = "appendable_ndim"
else:
# distinguish between a frame/table
tt = "legacy_panel"
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == "value":
tt = "legacy_frame"
except IndexError:
pass
try:
return globals()[_TABLE_MAP[tt]](
self, group, encoding=encoding, errors=errors
)
except KeyError:
raise error("_TABLE_MAP")
def _write_to_group(
self,
key: str,
value: FrameOrSeries,
format,
axes=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
fletcher32=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
):
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
if group is None:
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: "Node"):
s = self._create_storer(group)
s.infer_axes()
return s.read()
class TableIterator:
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
"""
chunksize: Optional[int]
store: HDFStore
s: Union["GenericFixed", "Table"]
def __init__(
self,
store: HDFStore,
s: Union["GenericFixed", "Table"],
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: Optional[int] = None,
auto_close: bool = False,
):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ["freq", "tz", "index_name"]
name: str
cname: str
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: Optional[str] = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
):
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
""" return whether I am an indexed column """
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
# GH#29692 mypy doesn't recognize self.table as having a "cols" attribute
# 'error: "None" has no attribute "cols"'
return getattr(self.table.cols, self.cname).is_indexed # type: ignore
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
""" set the values from this selection: take = take ownership """
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding, errors)
kwargs = dict()
kwargs["name"] = _ensure_decoded(self.index_name)
if self.freq is not None:
kwargs["freq"] = _ensure_decoded(self.freq)
# making an Index instance could throw a number of different errors
try:
new_pd_index = Index(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = Index(values, **kwargs)
new_pd_index = _set_tz(new_pd_index, self.tz)
self.values = new_pd_index
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler: "AppendableTable", append: bool):
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this column """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: "AppendableTable"):
""" validate that kind=category does not change the categories """
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(new_metadata, cur_metadata)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: "AppendableTable"):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self) -> bool:
return False
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Set the values from this selection.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
self.values = Int64Index(np.arange(len(values)))
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname=None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype=None,
data=None,
):
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: Union[np.ndarray, ABCExtensionArray]):
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
@classmethod
def _get_atom(cls, values: Union[np.ndarray, ABCExtensionArray]) -> "Col":
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
itemsize = dtype.itemsize
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
shape = (1, values.size)
if is_categorical_dtype(dtype):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
atom = cls.get_atom_datetime64(shape)
elif is_timedelta64_dtype(dtype):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> Type["Col"]:
""" return the PyTables column class for this column """
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> "Col":
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing "
"items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
self.set_data(values)
else:
self.data = values
own_data = self.data
# use the meta if needed
meta = _ensure_decoded(self.meta)
assert self.dtype is not None
# convert to the correct dtype
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == "datetime64":
# recreate with tz if indicated
own_data = _set_tz(own_data, self.tz, coerce=True)
elif dtype == "timedelta64":
own_data = np.asarray(own_data, dtype="m8[ns]")
elif dtype == "date":
try:
own_data = np.asarray(
[date.fromordinal(v) for v in own_data], dtype=object
)
except ValueError:
own_data = np.asarray(
[date.fromtimestamp(v) for v in own_data], dtype=object
)
elif meta == "category":
# we have a categorical
categories = self.metadata
codes = own_data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
own_data = Categorical.from_codes(
codes, categories=categories, ordered=self.ordered
)
else:
try:
own_data = own_data.astype(dtype, copy=False)
except TypeError:
own_data = own_data.astype("O", copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == "string":
own_data = _unconvert_string_array(
own_data, nan_rep=nan_rep, encoding=encoding, errors=errors
)
self.data = own_data
def get_attr(self):
""" get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
assert self.typ is not None
assert self.dtype is not None
self.kind = _dtype_to_kind(self.dtype)
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> "Col":
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed:
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
obj_type: Type[Union[DataFrame, Series]]
ndim: int
parent: HDFStore
group: "Node"
errors: str
is_table = False
def __init__(
self, parent: HDFStore, group: "Node", encoding=None, errors: str = "strict"
):
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> Tuple[int, int, int]:
""" compute and set our version """
version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
try:
version = tuple(int(x) for x in version.split("."))
if len(version) == 2:
version = version + (0,)
except AttributeError:
version = (0, 0, 0)
return version
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join(pprint_thing(x) for x in s)
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: List[str] = []
# indexer helpders
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
result = DatetimeIndex._simple_new(values.values, name=None, freq=freq)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, name=None, freq=freq)
return f
return klass
def validate_read(self, columns, where):
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, "value_type", None)
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == "datetime64":
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index):
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if is_extension_array_dtype(lev):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: List[Optional[Hashable]] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: "Node", start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type,)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
index_class = self._alias_to_class(
_ensure_decoded(getattr(node._v_attrs, "index_class", ""))
)
factory = self._get_index_factory(index_class)
kwargs = {}
if "freq" in node._v_attrs:
kwargs["freq"] = node._v_attrs["freq"]
if "tz" in node._v_attrs:
if isinstance(node._v_attrs["tz"], bytes):
# created by python2
kwargs["tz"] = node._v_attrs["tz"].decode("utf-8")
else:
# created by python3
kwargs["tz"] = node._v_attrs["tz"]
if kind == "date":
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None):
# TODO: we only have one test that gets here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
assert isinstance(value, (np.ndarray, ABCExtensionArray)), type(value)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError(
"Cannot store a category dtype in "
"a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel(), skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
else:
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif empty_array:
self.write_array_empty(key, value)
elif is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "datetime64"
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key, value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = "datetime64"
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Optional[Hashable]
@property
def shape(self):
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj.values)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1])
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1)
out = out.reindex(columns=items, copy=False)
return out
return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
table_type: str
levels = 1
is_table = True
index_axes: List[IndexCol]
non_index_axes: List[Tuple[int, Any]]
values_axes: List[DataCol]
data_columns: List
metadata: List
info: Dict
def __init__(
self, parent: HDFStore, group: "Node", encoding=None, errors: str = "strict"
):
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join(str(x) for x in self.version)
ver = f"[{jver}]"
jindex_axes = ",".join(a.name for a in self.index_axes)
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
f"invalid combinate of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combinate of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [
l if l is not None else f"level_{i}" for i, l in enumerate(obj.index.names)
]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
)
@property
def nrows_expected(self) -> int:
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self) -> bool:
""" has this table been created """
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> Dict[str, Any]:
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [
(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes
]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
return dict(d1 + d2 + d3) # type: ignore
# error: List comprehension has incompatible type
# List[Tuple[Any, None]]; expected List[Tuple[str, IndexCol]]
def index_cols(self):
""" return a list of my index cols """
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> List[str]:
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
""" return the metadata pathname for this key """
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : str
values : ndarray
"""
values = Series(values)
self.parent.put(
self._get_metadata_path(key),
values,
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
""" return the meta data array for this key """
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.attrs.info = self.info
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or dict()
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
self.levels = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.metadata = getattr(self.attrs, "metadata", None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
""" create/cache the indexables if they don't exist """
_indexables = []
desc = self.description
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
index_col.get_attr()
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c):
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
md = self.read_metadata(c)
meta = "category" if md is not None else None
obj = klass(
name=adj_name,
cname=c,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
obj.get_attr()
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium"
Raises
------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored "
"but cannot"
" be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
def read_axes(
self, where, start: Optional[int] = None, stop: Optional[int] = None
) -> bool:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
bool
Indicates success.
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return True
def get_object(self, obj, transposed: bool):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, dict())
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(
self,
axes,
obj,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
""" create and return the axes
legacy tables create an indexable column, indexable index,
non-indexable fields
Parameters
----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except KeyError:
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.errors = existing_table.errors
self.info = copy.copy(existing_table.info)
else:
existing_table = None
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: List = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters self.info
idx = [x for x in [0, 1] if x not in axes][0]
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)), np.array(sorted(exist_axis))
):
append_axis = exist_axis
# the non_index_axes info
info = self.info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
name = obj._AXIS_NAMES[idx]
new_index = _convert_index(name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(self.info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
block_obj = self.get_object(obj, transposed)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = block_obj.reindex(new_labels, axis=axis)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError):
jitems = ",".join(pprint_thing(item) for item in items)
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
)
blocks = new_blocks
blk_items = new_blk_items
# add my values
vaxes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except (IndexError, KeyError):
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{existing_table.values_axes}]"
)
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
b,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = _get_tz(data_converted.tz) if hasattr(data_converted, "tz") else None
meta = metadata = ordered = None
if is_categorical_dtype(data_converted):
ordered = data_converted.ordered
meta = "category"
metadata = np.array(data_converted.categories, copy=False).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(self.info)
vaxes.append(col)
j += 1
self.nan_rep = nan_rep
self.data_columns = [col.name for col in vaxes if col.is_data_indexable]
self.values_axes = vaxes
self.index_axes = new_index_axes
self.non_index_axes = new_non_index_axes
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.metadata = [c.name for c in self.values_axes if c.metadata is not None]
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, selection: "Selection", columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
obj = process_filter(field, filt)
return obj
def create_description(
self,
complib=None,
complevel: Optional[int] = None,
fletcher32: bool = False,
expectedrows: Optional[int] = None,
) -> Dict[str, Any]:
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name="table", expectedrows=expectedrows)
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None,
):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return Series(_set_tz(a.take_data(), a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
""" read the indices and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
""" support the new appendable table formats """
table_type = "appendable"
def write(
self,
obj,
axes=None,
append=False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
):
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
self.create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
# update my info
self.attrs.info = self.info
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None,
):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" support the new appendable table formats """
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: Type[Union[DataFrame, Series]] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
def get_object(self, obj, transposed: bool):
""" these are written transposed """
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
if not self.read_axes(where=where, start=start, stop=stop):
return None
info = (
self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes)
else dict()
)
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") == "MultiIndex":
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, np.ndarray):
df = DataFrame(values.T, columns=cols_, index=index_)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame([values], columns=cols_, index=index_)
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self) -> bool:
return False
def get_object(self, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_multiseries"
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or "values"
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super().write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
""" create the indexables from the table description """
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
index_col.get_attr()
_indexables = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
dc.get_attr()
_indexables.append(dc)
return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(l) else l for l in df.index.names]
)
return df
def _reindex_axis(obj, axis: int, labels: Index, other=None):
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: List[Union[slice, Index]] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> Union[str, tzinfo]:
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
return zone
def _set_tz(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
name = getattr(values, "name", None)
values = values.ravel()
tz = timezones.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
return values
def _convert_index(name: str, index: Index, encoding=None, errors="strict"):
assert isinstance(name, str)
index_name = index.name
converted, dtype_name = _get_data_and_dtype_name(index)
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if isinstance(index, Int64Index):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we wont get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name,
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name,
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name,)
def _unconvert_index(data, kind: str, encoding=None, errors="strict"):
index: Union[Index, np.ndarray]
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
):
if not block.is_object:
return block.values
dtype_name = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
elif inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
elif not (inferred_type == "string" or dtype_name == "object"):
return block.values
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
# Note: because block is always object dtype, fillna goes
# through a path such that the result is always a 1-element list
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel(), skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
for i in range(len(block.shape[0])):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel(), skipna=False)
if inferred_type != "string":
iloc = block.mgr_locs.indexer[i]
raise TypeError(
f"Cannot serialize the column [{iloc}] because\n"
f"its data contents are [{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
assert data_converted.shape == block.shape, (data_converted.shape, block.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = (
Series(data.ravel()).str.encode(encoding, errors).values.reshape(data.shape)
)
# create the sized dtype
if itemsize is None:
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None, errors="strict"):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
errors : handler for encoding errors, default 'strict'
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data).str.decode(encoding, errors=errors).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind, encoding: str, errors: str):
val_kind = _ensure_decoded(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif kind == "string":
return lambda x: _unconvert_string_array(x, encoding=encoding, errors=errors)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind) -> bool:
if kind in ("datetime64", "string"):
return True
return False
def _maybe_adjust_name(name: str, version) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
except IndexError:
pass
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
dtype_str = _ensure_decoded(dtype_str)
if dtype_str.startswith("string") or dtype_str.startswith("bytes"):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith("int") or dtype_str.startswith("uint"):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = "datetime64"
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: Union[np.ndarray, ABCExtensionArray]):
"""
Convert the passed data into a storable form and a dtype string.
"""
if is_categorical_dtype(data.dtype):
data = data.codes
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = data.dtype.name.split("[")[0]
if data.dtype.kind in ["m", "M"]:
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesnt seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
except ValueError:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
raise ValueError(
f"The passed where expression: {where}\n"
" contains an invalid variable reference\n"
" all of the variable references must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
f" The currently defined references are: {qkeys}\n"
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
|
py | b40637e8eb19049e9ced64111a07fd6878ab7d1f |
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import simple_daq
|
py | b40638c94bdc8ee48fc1ccfac624749de17a1517 | from enum import Enum
class SubscriberEventOutcome(Enum):
subscription_created = "subscription created"
resubscribed = "resubscribed"
already_subscribed = "already subscribed"
unsubscribed = "unsubscribed"
already_unsubscribed = "already unsubscribed"
subscription_does_not_exist = "subscription does not exist" |
py | b40639b7bf3c7306d9160d3836af5ac546071811 | # pylint: disable=unused-import, missing-docstring
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.preferences.panel.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK GTK3 Preferences Panels."""
# Standard Library Imports
from typing import Any, Dict, List
# Third Party Imports
import toml
from pubsub import pub
# RAMSTK Package Imports
from ramstk.configuration import RAMSTKUserConfiguration
from ramstk.utilities import string_to_boolean
from ramstk.views.gtk3 import Gdk, Gtk, _
from ramstk.views.gtk3.widgets import (
RAMSTKComboBox,
RAMSTKEntry,
RAMSTKFileChooserButton,
RAMSTKFixedPanel,
RAMSTKLabel,
RAMSTKTreePanel,
RAMSTKTreeView,
)
class GeneralPreferencesPanel(RAMSTKFixedPanel):
"""The panel to display options to be edited."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_select_msg = "succeed_get_preferences_attributes"
_tag = "preferences"
_title = _("General Preferences")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the Preferences panel."""
super().__init__()
# Initialize widgets.
self.btnConfDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Configuration File Directory")
)
self.btnDataDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Data Directory")
)
self.btnIconDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Icon Directory")
)
self.btnLogDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Log Directory")
)
self.cmbModuleBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbWorkBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbListBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbReportSize: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.txtFRMultiplier: RAMSTKEntry = RAMSTKEntry()
self.txtDecimalPlaces: RAMSTKEntry = RAMSTKEntry()
self.txtMissionTime: RAMSTKEntry = RAMSTKEntry()
# Initialize private dict instance attributes.
# Initialize private list instance attributes.
# Initialize private scalar instance attributes.
self._configuration: RAMSTKUserConfiguration = RAMSTKUserConfiguration()
# Initialize public dict instance attributes.
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"module_book_tab_pos": [
0,
self.cmbModuleBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Module Book Tab Position:"),
"gchararray",
],
"work_book_tab_pos": [
1,
self.cmbWorkBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Work Book Tab Position:"),
"gchararray",
],
"list_book_tab_pos": [
2,
self.cmbListBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("List Book Tab Position:"),
"gchararray",
],
"report_size": [
3,
self.cmbReportSize,
"changed",
super().on_changed_combo,
"",
"Letter",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Report Paper Size:"),
"gchararray",
],
"fr_multiplier": [
4,
self.txtFRMultiplier,
"changed",
super().on_changed_entry,
"",
6,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
"width": 75,
},
_("Failure Rate Multiplier:"),
"gfloat",
],
"decimals": [
5,
self.txtDecimalPlaces,
"changed",
super().on_changed_entry,
"",
3,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
"width": 75,
},
_("Decimal Places:"),
"gint",
],
"mission_time": [
6,
self.txtMissionTime,
"changed",
super().on_changed_entry,
"",
1.0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Reliability Mission Time:"),
"gfloat",
],
"config_file_path": [
7,
self.btnConfDir,
"file-set",
self._do_select_path,
"",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Configuration Files:"),
"gchararray",
],
"data_file_path": [
8,
self.btnDataDir,
"file-set",
self._do_select_path,
"",
1,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Data Files:"),
"gchararray",
],
"icon_file_path": [
9,
self.btnIconDir,
"file-set",
self._do_select_path,
"",
1,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Icon Files:"),
"gchararray",
],
"log_file_path": [
10,
self.btnLogDir,
"file-set",
self._do_select_path,
"",
2,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Log Files:"),
"gchararray",
],
}
# Initialize public list instance attributes.
# Initialize public scalar instance attributes.
super().do_set_properties()
super().do_make_panel()
self._do_load_comboboxes()
super().do_set_callbacks()
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_load_panel, "request_load_preferences")
def _do_load_panel(self, configuration: RAMSTKUserConfiguration) -> None:
"""Load the current preference values.
:return: None
:rtype: None
"""
_positions = {"bottom": 1, "left": 2, "right": 3, "top": 4}
_papersize = {"a4": 1, "letter": 2}
self._configuration = configuration
self.cmbModuleBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["modulebook"].lower()],
signal="changed",
)
self.cmbWorkBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["workbook"].lower()],
signal="changed",
)
self.cmbListBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["listbook"].lower()],
signal="changed",
)
self.cmbReportSize.do_update(
_papersize[self._configuration.RAMSTK_REPORT_SIZE.lower()], signal="changed"
)
self.txtFRMultiplier.do_update(
str(self._configuration.RAMSTK_HR_MULTIPLIER), signal="changed"
)
self.txtDecimalPlaces.do_update(
str(self._configuration.RAMSTK_DEC_PLACES), signal="changed"
)
self.txtMissionTime.do_update(
str(self._configuration.RAMSTK_MTIME), signal="changed"
)
self.btnConfDir.set_current_folder(self._configuration.RAMSTK_CONF_DIR)
self.btnDataDir.set_current_folder(self._configuration.RAMSTK_DATA_DIR)
self.btnIconDir.set_current_folder(self._configuration.RAMSTK_ICON_DIR)
self.btnLogDir.set_current_folder(self._configuration.RAMSTK_LOG_DIR)
def _do_load_comboboxes(self) -> None:
"""Load the RAMSTKComboBoxes() with their data.
:return: None
:rtype: None
"""
self.cmbModuleBookTabPosition.do_load_combo(
[["Bottom"], ["Left"], ["Right"], ["Top"]]
)
self.cmbWorkBookTabPosition.do_load_combo(
[["Bottom"], ["Left"], ["Right"], ["Top"]]
)
self.cmbListBookTabPosition.do_load_combo(
[["Bottom"], ["Left"], ["Right"], ["Top"]]
)
self.cmbReportSize.do_load_combo([["A4"], ["Letter"]])
def _do_select_path(self, button: Gtk.FileChooserButton, index: int) -> None:
"""Select the path from the file chooser.
:param button: the Gtk.FileChooserButton() that called this method.
:param index: the index of the Gtk.FileChooserButton() that called
this method.
:return: None
:rtyp: None
"""
if index == 0:
self._configuration.RAMSTK_CONF_DIR = button.get_current_folder()
elif index == 1:
self._configuration.RAMSTK_DATA_DIR = button.get_current_folder()
elif index == 2:
self._configuration.RAMSTK_ICON_DIR = button.get_current_folder()
elif index == 3:
self._configuration.RAMSTK_LOG_DIR = button.get_current_folder()
def _on_changed_combo(self, combo: RAMSTKComboBox, index: int) -> None:
"""Edit RAMSTKTreeView() layouts.
:param combo: the RAMSTKComboBox() that called this method.
:type combo: :class:`gui.gtk.RAMSTKComboBox`
:param index: the index in the signal handler list associated with
the RAMSTKComboBox() calling this method.
:return: None
:rtype: None
"""
combo.handler_block(combo.dic_handler_id["changed"])
if index == 1:
self._configuration.RAMSTK_TABPOS["modulebook"] = combo.get_value()
elif index == 2:
self._configuration.RAMSTK_TABPOS["workbook"] = combo.get_value()
elif index == 3:
self._configuration.RAMSTK_TABPOS["listbook"] = combo.get_value()
elif index == 4:
self._configuration.RAMSTK_REPORT_SIZE = combo.get_value()
combo.handler_unblock(combo.dic_handler_id["changed"])
class LookFeelPreferencesPanel(RAMSTKFixedPanel):
"""The panel to display options to be edited."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_select_msg = "succeed_get_preferences_attributes"
_tag = "preferences"
_title = _("Look & Feel")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the Look and Feel panel."""
super().__init__()
# Initialize widgets.
self.btnRevisionBGColor = Gtk.ColorButton()
self.btnRevisionFGColor = Gtk.ColorButton()
self.btnFunctionBGColor = Gtk.ColorButton()
self.btnFunctionFGColor = Gtk.ColorButton()
self.btnRequirementsBGColor = Gtk.ColorButton()
self.btnRequirementsFGColor = Gtk.ColorButton()
self.btnHardwareBGColor = Gtk.ColorButton()
self.btnHardwareFGColor = Gtk.ColorButton()
self.btnValidationBGColor = Gtk.ColorButton()
self.btnValidationFGColor = Gtk.ColorButton()
# Initialize private dict instance attributes.
# Initialize private list instance attributes.
# Initialize private scalar instance attributes.
self._configuration: RAMSTKUserConfiguration = RAMSTKUserConfiguration()
# Initialize public dict instance attributes.
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"revisionbg": [
0,
self.btnRevisionBGColor,
"color-set",
self._do_set_color,
"",
"#FFFFFF",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Revision Tree Background Color:"),
],
"revisionfg": [
1,
self.btnRevisionFGColor,
"color-set",
self._do_set_color,
"",
"#000000",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Revision Tree Foreground Color:"),
],
"functionbg": [
2,
self.btnFunctionBGColor,
"color-set",
self._do_set_color,
"",
"#FFFFFF",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Function Tree Background Color:"),
],
"functionfg": [
3,
self.btnFunctionFGColor,
"color-set",
self._do_set_color,
"",
"#000000",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Function Tree Foreground Color:"),
],
"requirementbg": [
4,
self.btnRequirementsBGColor,
"color-set",
self._do_set_color,
"",
"#FFFFFF",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
"width": 75,
},
_("Requirements Tree Background Color:"),
],
"requirementfg": [
5,
self.btnRequirementsFGColor,
"color-set",
self._do_set_color,
"",
"#000000",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
"width": 75,
},
_("Requirements Tree Foreground Color:"),
],
"hardwarebg": [
6,
self.btnHardwareBGColor,
"color-set",
self._do_set_color,
"",
"#FFFFFF",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Hardware Tree Background Color:"),
],
"hardwarefg": [
7,
self.btnHardwareFGColor,
"color-set",
self._do_set_color,
"",
"#000000",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Hardware Tree Foreground Color:"),
],
"validationbg": [
8,
self.btnValidationBGColor,
"color-set",
self._do_set_color,
"",
"#FFFFFF",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Validation Tree Background Color:"),
],
"validationfg": [
9,
self.btnValidationFGColor,
"color-set",
self._do_set_color,
"",
"#000000",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"height": 30,
"visible": True,
},
_("Validation Tree Foreground Color:"),
],
}
# Initialize public list instance attributes.
# Initialize public scalar instance attributes.
self._do_set_properties()
super().do_make_panel()
self._do_set_callbacks()
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_load_panel, "request_load_preferences")
def _do_load_panel(self, configuration: RAMSTKUserConfiguration) -> None:
"""Load the current preference values.
:return: None
:rtype: None
"""
self._configuration = configuration
self.btnRevisionBGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["revisionbg"])
)
self.btnRevisionFGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["revisionfg"])
)
self.btnFunctionBGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["functionbg"])
)
self.btnFunctionFGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["functionfg"])
)
self.btnRequirementsBGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["requirementbg"])
)
self.btnRequirementsFGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["requirementfg"])
)
self.btnHardwareBGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["hardwarebg"])
)
self.btnHardwareFGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["hardwarefg"])
)
self.btnValidationBGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["validationbg"])
)
self.btnValidationFGColor.set_color(
Gdk.color_parse(self._configuration.RAMSTK_COLORS["validationfg"])
)
def _do_set_color(self, colorbutton: Gtk.ColorButton, ramstk_color: int) -> None:
"""Set the selected color.
:param colorbutton: the Gtk.ColorButton() that called this method.
:param ramstk_color: the name of the color to set.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Retrieve the six digit hexidecimal version of the selected color.
_color = colorbutton.get_color()
try:
_red = f"{int(_color.red / 255)}:#02"
except ValueError:
_red = f"{int(_color.red / 255)}"
try:
_green = f"{int(_color.green / 255)}:#02"
except ValueError:
_green = f"{int(_color.green / 255)}"
try:
_blue = f"{int(_color.blue / 255)}:#02"
except ValueError:
_blue = f"{int(_color.blue / 255)}"
_color = f"#{_red}{_green}{_blue}"
# Set the color variable.
self._configuration.RAMSTK_COLORS[ramstk_color] = _color
def _do_set_callbacks(self) -> None:
"""Set the callback functions/methods for each of the widgets.
:return: None
:rtype: None
"""
# ----- BUTTONS
self.btnRevisionBGColor.connect("color-set", self._do_set_color, "revisionbg")
self.btnRevisionFGColor.connect("color-set", self._do_set_color, "revisionfg")
self.btnFunctionBGColor.connect("color-set", self._do_set_color, "functionbg")
self.btnFunctionFGColor.connect("color-set", self._do_set_color, "functionfg")
self.btnRequirementsBGColor.connect(
"color-set", self._do_set_color, "requirementbg"
)
self.btnRequirementsFGColor.connect(
"color-set", self._do_set_color, "requirementfg"
)
self.btnHardwareBGColor.connect("color-set", self._do_set_color, "hardwarebg")
self.btnHardwareFGColor.connect("color-set", self._do_set_color, "hardwarefg")
self.btnValidationBGColor.connect(
"color-set", self._do_set_color, "validationbg"
)
self.btnValidationFGColor.connect(
"color-set", self._do_set_color, "validationfg"
)
def _do_set_properties(self) -> None:
"""Set the properties of the Preferences assistance widgets.
:return: None
:rtype: None
"""
# ----- BUTTONS
self.btnRevisionBGColor.height = 30
self.btnRevisionFGColor.height = 30
self.btnFunctionBGColor.height = 30
self.btnFunctionFGColor.height = 30
self.btnRequirementsBGColor.height = 30
self.btnRequirementsFGColor.height = 30
self.btnHardwareBGColor.height = 30
self.btnHardwareFGColor.height = 30
self.btnValidationBGColor.height = 30
self.btnValidationFGColor.height = 30
class TreeLayoutPreferencesPanel(RAMSTKTreePanel):
"""The panel to display options to be edited."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_select_msg = None
_tag = "preferences"
_title = _("Tree View Layout")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the RAMSTKTreeView Layout panel."""
super().__init__()
# Initialize widgets.
self.cmbFormatFiles: RAMSTKComboBox = RAMSTKComboBox(simple=False)
self.tvwTreeView: RAMSTKTreeView = RAMSTKTreeView()
# Initialize private dict instance attributes.
# Initialize private list instance attributes.
self._lst_labels: List[str] = [_("Select format file to edit:")]
# Initialize private scalar instance attributes.
self._configuration: RAMSTKUserConfiguration = RAMSTKUserConfiguration()
# Initialize public dict instance attributes.
# Initialize public list instance attributes.
# Initialize public scalar instance attributes.
self.fmt_file: str = ""
# Make a fixed type panel.
self._do_load_comboboxes()
self.__do_make_treeview()
self.__make_ui()
self.cmbFormatFiles.do_set_properties(
tooltip=_("Select the Tree View layout to edit.")
)
self.cmbFormatFiles.dic_handler_id["changed"] = self.cmbFormatFiles.connect(
"changed", self._on_combo_changed
)
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_load_panel, "request_load_preferences")
def _do_load_comboboxes(self) -> None:
"""Load the RAMSTKComboBoxes() with their data.
:return: None
:rtype: None
"""
self.cmbFormatFiles.do_load_combo(
[
[_("Allocation"), "allocation", ""],
[_("Failure Definition"), "failure_definition", ""],
[_("(D)FME(C)A"), "fmea", ""],
[_("Function"), "function", ""],
[_("Hardware"), "hardware", ""],
[_("Hazards Analysis"), "hazard", ""],
[_("Physics of Failure Analysis"), "pof", ""],
[_("Requirements"), "requirement", ""],
[_("Revisions"), "revision", ""],
[_("Similar Item Analysis"), "similar_item", ""],
[_("Stakeholder Inputs"), "stakeholder", ""],
[_("Usage Profile"), "usage_profile", ""],
[_("Validation"), "validation", ""],
],
simple=False,
)
def _do_load_format(self, module: str) -> None:
"""Load the selected Module View format file for editing.
:param module: the name of the RAMSTK workstream module whose
Module View layout is to be edited.
:return: None
:rtype: None
"""
self.fmt_file = (
self._configuration.RAMSTK_CONF_DIR
+ "/layouts/"
+ self._configuration.RAMSTK_FORMAT_FILE[module]
)
_format = toml.load(self.fmt_file)
_datatypes = _format["datatype"]
_defaulttitle = _format["defaulttitle"]
_editable = _format["editable"]
_usertitle = _format["usertitle"]
_keys = _format["key"]
_position = _format["position"]
_visible = _format["visible"]
_widgets = _format["widget"]
_model = self.tvwTreeView.get_model()
_model.clear()
for _key in _defaulttitle:
_data = [
_defaulttitle[_key],
_usertitle[_key],
int(_position[_key]),
string_to_boolean(_editable[_key]),
string_to_boolean(_visible[_key]),
_datatypes[_key],
_widgets[_key],
_keys[_key],
_key,
]
_model.append(None, _data)
def _do_load_panel(self, configuration: RAMSTKUserConfiguration) -> None:
"""Load the current preference values.
:return: None
:rtype: None
"""
self._configuration = configuration
def _on_combo_changed(self, combo: RAMSTKComboBox) -> None:
"""Edit RAMSTKTreeView() layouts.
:param combo: the RAMSTKComboBox() that called this method.
:return: None
:rtype: None
"""
combo.handler_block(combo.dic_handler_id["changed"])
_model = combo.get_model()
_row = combo.get_active_iter()
_module = _model.get_value(_row, 1)
self._do_load_format(_module)
combo.handler_unblock(combo.dic_handler_id["changed"])
def __do_make_treeview(self) -> None:
"""Make the format file editing Gtk.Treeview().
:return: None
:rtype: None
"""
self.tvwTreeView.position = {
"col0": 0,
"col1": 1,
"col2": 2,
"col3": 3,
"col4": 4,
"col5": 5,
"col6": 6,
"col7": 7,
"col8": 8,
}
self.tvwTreeView.headings = {
"col0": _("Default\nTitle"),
"col1": _("User\nTitle"),
"col2": _("Column\nPosition"),
"col3": _("Can\nEdit?"),
"col4": _("Is\nVisible?"),
"col5": "",
"col6": "",
"col7": "",
"col8": "",
}
self.tvwTreeView.editable = {
"col0": "False",
"col1": "True",
"col2": "True",
"col3": "True",
"col4": "True",
"col5": "False",
"col6": "False",
"col7": "False",
"col8": "False",
}
self.tvwTreeView.visible = {
"col0": "True",
"col1": "True",
"col2": "True",
"col3": "True",
"col4": "True",
"col5": "False",
"col6": "False",
"col7": "False",
"col8": "False",
}
self.tvwTreeView.datatypes = {
"col0": "gchararray",
"col1": "gchararray",
"col2": "gint",
"col3": "gint",
"col4": "gint",
"col5": "gchararray",
"col6": "gchararray",
"col7": "gchararray",
"col8": "gchararray",
}
self.tvwTreeView.korder = {
"col0": "default_title",
"col1": "user_title",
"col2": "column_position",
"col3": "can_edit",
"col4": "is_visible",
"col5": "unk1",
"col6": "unk2",
"col7": "unk3",
"col8": "unk4",
}
self.tvwTreeView.widgets = {
"default_title": Gtk.CellRendererText(),
"user_title": Gtk.CellRendererText(),
"column_position": Gtk.CellRendererText(),
"can_edit": Gtk.CellRendererToggle(),
"is_visible": Gtk.CellRendererToggle(),
"unk1": Gtk.CellRendererText(),
"unk2": Gtk.CellRendererText(),
"unk3": Gtk.CellRendererText(),
"unk4": Gtk.CellRendererText(),
}
self.tvwTreeView.do_make_model()
self.tvwTreeView.do_make_columns()
self.tvwTreeView.do_set_editable_columns(self.tvwTreeView.do_edit_cell)
def __make_ui(self) -> None:
"""Build the UI for the Preferences assistant."""
super().do_make_panel()
_scrollwindow = self.get_child()
self.remove(_scrollwindow)
_label = RAMSTKLabel(self._lst_labels[0])
_x_pos = _label.get_attribute("width")
_fixed: Gtk.Fixed = Gtk.Fixed()
_fixed.put(_label, 5, 5)
_fixed.put(self.cmbFormatFiles, _x_pos + 10, 5)
_vbox = Gtk.VBox()
_vbox.pack_start(_fixed, False, False, 0)
_vbox.pack_end(_scrollwindow, True, True, 0)
self.add(_vbox)
|
py | b4063a644030e1c617bcb245ecf1f549b11fb370 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Main file for performing activity control operations
Activity Control is the only class defined in this file.
ActivityControl: Class for managing Activity Control enable/disable
for various entities within the comcell.
ActivityControl:
__init__(commcell_object) -- initialise object of Class associated to the commcell
__repr__() -- String representation of the instance of this class.
set() -- method to set activity control.
enable_after_delay() -- method to disable activity control and set a delay time.
_get_activity_control_status() -- method to get activity control status
is_enabled() -- boolean specifying if a given activity is enabled or not
**reEnableTime** -- returns the Enable back time
**reEnableTimeZone** -- returns the Enable back time zone
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .exception import SDKException
class ActivityControl(object):
"""Class for performing activity control operations."""
def __init__(self, commcell_object):
"""Initialise the Activity control class instance.
Args:
commcell_object (object) -- instance of the Commcell class
Returns:
object - instance of the ActivityControl class
"""
self._commcell_object = commcell_object
self._activity_type_dict = {
"ALL ACTIVITY": 128,
"DATA MANAGEMENT": 1,
"DATA RECOVERY": 2,
"DATA AGING": 16,
"AUX COPY": 4,
"DATA VERIFICATION": 8192,
"DDB ACTIVITY": 512,
"SCHEDULER": 256,
"OFFLINE CONTENT INDEXING": 1024,
}
self._get_activity_control_status()
def __repr__(self):
"""String representation of the instance of this class."""
representation_string = 'ActivityControl class instance'
return representation_string
def _request_json_(self, activity_type, enable_time):
"""Returns the JSON request to pass to the API
as per the options selected by the user.
Returns:
dict - JSON request to pass to the API
"""
request_json = {
"commCellInfo": {
"commCellActivityControlInfo": {
"activityControlOptions": [
{
"activityType": self._activity_type_dict[activity_type],
"enableAfterADelay": True,
"enableActivityType": False,
"dateTime": {
"time": enable_time}}]}}}
return request_json
def set(self, activity_type, action):
"""Sets activity control on Commcell.
Args:
activity_type (str) -- Activity Type to be Enabled or Disabled
Values:
"ALL ACTIVITY",
"DATA MANAGEMENT",
"DATA RECOVERY",
"DATA AGING",
"AUX COPY",
"DATA VERIFICATION",
"DDB ACTIVITY",
"SCHEDULER",
"OFFLINE CONTENT INDEXING",
action (str) -- Enable or Disable
Values:
Enable
Disable
Raises:
SDKException:
if failed to set
if response is empty
if response is not success
"""
set_request = self._commcell_object._services['SET_ACTIVITY_CONTROL'] % (
str(self._activity_type_dict[activity_type]), str(action))
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', set_request
)
if flag:
if response.json():
error_code = str(response.json()['errorCode'])
if error_code == '0':
self._get_activity_control_status()
return
else:
raise SDKException(
'CVPySDK', '102', response.json()['errorMessage'])
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def enable_after_delay(self, activity_type, enable_time):
"""Disables activity if not already disabled
and enables at the time specified.
Args:
activity_type (str) -- Activity Type to be Enabled or Disabled
Values:
"ALL ACTIVITY",
"DATA MANAGEMENT",
"DATA RECOVERY",
"DATA AGING",
"AUX COPY",
"DATA VERIFICATION",
"DDB ACTIVITY",
"SCHEDULER",
"OFFLINE CONTENT INDEXING",
enable_time (str)-- Unix Timestamp in UTC timezone
Raises:
SDKException:
if failed to enable activity control after a time
if response is empty
if response is not success
"""
request_json = self._request_json_(activity_type, enable_time)
set_request = self._commcell_object._services['SET_COMMCELL_PROPERTIES']
flag, response = self._commcell_object._cvpysdk_object.make_request(
'PUT', set_request, request_json
)
if flag:
if response.json() and 'response' in response.json():
error_code = response.json()['response'][0]['errorCode']
if error_code == 0:
self._get_activity_control_status()
return
elif 'errorMessage' in response.json()['response'][0]:
error_message = response.json(
)['response'][0]['errorMessage']
o_str = 'Failed to enable activity control \
after a delay\nError: "{0}"'.format(
error_message)
raise SDKException('CVPySDK', '102', o_str)
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def _get_activity_control_status(self):
"""Gets the activity control status
Raises:
SDKException:
if response is empty
if response is not success
"""
get_request = self._commcell_object._services['GET_ACTIVITY_CONTROL']
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', get_request
)
if flag:
if response.json() and 'acObjects' in response.json():
self._activity_control_properties_list = response.json()[
'acObjects']
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def is_enabled(self, activity_type):
"""Returns True/False based on the enabled flag and also sets
other relevant properties for a given activity type.
Args:
activity_type (str) -- Activity Type to be Enabled or Disabled
Values:
"ALL ACTIVITY",
"DATA MANAGEMENT",
"DATA RECOVERY",
"DATA AGING",
"AUX COPY",
"DATA VERIFICATION",
"DDB ACTIVITY",
"SCHEDULER",
"OFFLINE CONTENT INDEXING",
"""
self._get_activity_control_status()
for each_activity in self._activity_control_properties_list:
if int(each_activity['activityType']) == \
self._activity_type_dict[activity_type]:
self._reEnableTime = each_activity['reEnableTime']
self._noSchedEnable = each_activity['noSchedEnable']
self._reenableTimeZone = each_activity['reenableTimeZone']
return each_activity['enabled']
o_str = 'Failed to find activity type:"{0}" in the response'.format(
activity_type)
raise SDKException('Client', '102', o_str)
@property
def reEnableTime(self):
"""Treats the reEnableTime as a read-only attribute."""
return self._reEnableTime
@property
def reEnableTimeZone(self):
"""Treats the reEnableTimeZone as a read-only attribute."""
return self._reenableTimeZone
|
py | b4063c0efb3a61923558b28fbf52dde333cd696b | from __future__ import unicode_literals
import json
from math import ceil
import frappe
from shipment_management.provider_fedex import get_fedex_packages_rate
from shipment_management.utils import get_country_code
@frappe.whitelist()
def get_rates_for_doc(doc, address=None, address_obj=None):
doc = json.loads(doc)
from frappe.contacts.doctype.address.address import get_address_display
if not address_obj:
to_address = frappe.get_doc("Address", address or doc.get("shipping_address_name"))
frappe.local.response["address"] = get_address_display(to_address.as_dict())
else:
to_address = json.loads(address_obj)
frappe.local.response["address"] = get_address_display(to_address)
from_address = frappe.get_doc("Address", {"is_your_company_address" : 1})
return get_rates(from_address, to_address, doc=doc)
def get_rates(from_address, to_address, items=None, doc=None, packaging_type="YOUR_PACKAGING"):
"""Simple wrapper over fedex rating service.
It takes the standard address field values for the from_ and to_ addresses
to keep a consistent address api.
"""
# quick hack to package all items into one box for quick shipping quotations
# packages = find_packages(items)
packages = []
package = {
"weight_value": 0,
"weight_units": "LB",
"physical_packaging": "BOX",
"group_package_count": 0,
"insured_amount": 0
}
item_values = frappe.get_all("Item", fields=["insured_declared_value", "name", "net_weight"])
item_values = {elem.pop("name"): elem for elem in item_values}
if doc and not items:
items = doc.get("items")
# Set the item weights, quantity and insured amounts in the package(s).
# For repairs, only process packages once for each warranty claim.
processed_claims = []
weight_value = group_package_count = insured_amount = 0
for item in items:
if item.get("warranty_claim") and item.get("warranty_claim") not in processed_claims:
repair_items = frappe.db.get_value("Warranty Claim", item.get("warranty_claim"), ["item_code", "cable", "case"])
repair_items = list(filter(None, repair_items))
group_package_count = len(repair_items)
for repair_item in repair_items:
weight_value += item_values.get(repair_item, {}).get("net_weight", 0)
insured_amount += item_values.get(repair_item, {}).get("insured_declared_value", 0)
processed_claims.append(item.get("warranty_claim"))
else:
group_package_count += item.get("qty", 0)
weight_value += item_values.get(item.get("item_code"), {}).get("net_weight", 0) * item.get("qty", 0)
insured_amount += item_values.get(item.get("item_code"), {}).get("insured_declared_value", 0) * item.get("qty", 0)
package["weight_value"] = max(1, ceil(weight_value))
package["group_package_count"] = group_package_count
package["insured_amount"] = insured_amount
packages.append(package)
# to try and keep some form of standardization we'll minimally require
# a weight_value. Any other values will be passed as is to the rates service.
surcharge = 0
for package in packages:
if package.get("weight_value") is None or package.get("weight_units") is None:
raise frappe.exceptions.ValidationError("Missing weight_value data")
# if not package.get("group_package_count"):
# keep count on 1 as we don't care about package groups
package["group_package_count"] = 1
if not package.get("insured_amount"):
package["insured_amount"] = 0
if not package.get("physical_packaging"):
package["physical_packaging"] = "BOX"
surcharge = surcharge + package.get("surcharge", 0)
# check item conditions for applying Fedex One Rate pricing
rate_settings = frappe.get_single("Shipment Rate Settings")
RecipientCountryCode = get_country_code(to_address.get("country", ""))
flat_rate = False
signature_option = "DIRECT"
packaging = packaging_type
if RecipientCountryCode.lower() == "us": # One Rate only applies for intra-US deliveries
flat_rate_items = {item.item: item.max_qty for item in rate_settings.items}
for item in items:
if item.get("qty", 0) < flat_rate_items.get(item.get("item_code"), 0):
flat_rate = True
signature_option = None
packaging = frappe.db.get_value("Shipment Rate Item Settings", {"item": item.get("item_code")}, "packaging")
packaging = frappe.db.get_value("Shipping Package", packaging, "box_code")
else:
flat_rate = False
signature_option = "DIRECT"
packaging = packaging_type
break
# form rate request arguments
rate_exceptions = []
args = dict(
DropoffType='REGULAR_PICKUP',
PackagingType=packaging,
EdtRequestType='NONE',
PaymentType='SENDER',
# Shipper
ShipperPostalCode=(from_address.get("pincode") or "").strip(),
ShipperCountryCode=get_country_code(from_address.get("country")),
# Recipient
RecipientPostalCode=(to_address.get("pincode") or "").strip(),
IsResidential=to_address.get("is_residential"),
RecipientCountryCode=RecipientCountryCode,
# Delivery options
package_list=packages,
ignoreErrors=True,
signature_option=signature_option,
exceptions=rate_exceptions,
delivery_date=doc.get("delivery_date", "") if doc else "",
saturday_delivery=doc.get("saturday_delivery", "") if doc else "",
flat_rate=flat_rate
)
if to_address:
rates = get_fedex_packages_rate(**args) or []
# since we're working on v18 of Fedex's rate service, which is incompatible with
# getting One Rate and non-One Rate prices in the same query, we do another query
# to get the non-One Rate prices and update the existing rates
if flat_rate:
non_flat_rate_args = args.copy()
non_flat_rate_args.update({"flat_rate": False, "signature_option": "DIRECT", "PackagingType": packaging_type})
flat_rates = get_fedex_packages_rate(**non_flat_rate_args) or []
rates.extend(flat_rates)
else:
rates = []
if rates:
sorted_rates = []
unique_labels = []
for rate in sorted(rates, key=lambda rate: rate["fee"]):
# remove duplicate shipping methods
if rate["label"] in unique_labels:
continue
# disallow FEDEX GROUND for Canada
if RecipientCountryCode.lower() == "ca" and rate['label'] == "FEDEX GROUND":
continue
unique_labels.append(rate["label"])
rate["fee"] += surcharge
if rate_settings.upcharge_type == "Percentage":
rate["fee"] += (rate["fee"] * (rate_settings.upcharge / 100))
elif rate_settings.upcharge_type == "Actual":
rate["fee"] += rate_settings.upcharge
rate['fee'] = round(rate['fee'], 2)
sorted_rates.append(rate)
return sorted_rates
else:
msg = "Could not get rates, please check your Shipping Address"
if len(rate_exceptions) > 0:
for ex in rate_exceptions:
if ex["type"] == "request":
msg = str(ex["exception"])
break
frappe.throw(msg, title="Error")
|
py | b4063d5436973c03257acd98fb06275018a06e9b | # For UniBorg
# By Priyam Kalra
# Syntax (.hl <link>)
from telethon import events
from uniborg.util import admin_cmd
from telethon.tl import functions, types
@borg.on(admin_cmd(pattern="hl ?(.*)"))
async def _(event):
if event.fwd_from:
return
input = event.pattern_match.group(1)
await event.edit("[ㅤㅤㅤㅤㅤㅤㅤ](" + input + ")")
|
py | b4063ec14dd5a7aa99a6da7c872d3568f8b54c71 | import torch
from PIL import Image
from torchvision import transforms
from mnist import LeNet
import os
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.ToTensor()
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
def get_files(directory):
return [os.path.join(directory, f) for f in sorted(list(os.listdir(directory)))
if os.path.isfile(os.path.join(directory, f))]
def predictdir(img_dir):
net = LeNet()
net.load_state_dict(torch.load('./model.pkl'))
net.eval()
torch.no_grad()
file = get_files(img_dir)
for i, img_path in enumerate(file):
img = Image.open(img_path)
img = transform(img).unsqueeze(0)
img_ = img.to(device)
outputs = net(img_)
_, predicted = torch.max(outputs, 1)
print(img_path, classes[predicted[0]])
if __name__ == '__main__':
dir_path = './data/MNIST/mnist_train/1'
predictdir(dir_path) |
py | b4063f73770a4d512ecd66017fab87158b72339d | # Reto propuesto por el curso que estoy siguiendo de Structuralia (https://sando.aulapharos.com/).
# Tema2 INPUT – OUTPUT Y VARIABLES
'''
Debes hacer un script que pregunte al usuario por dos números y los almacene (en diferentes variables).
A continuación el programa deberá mostrarnos varias operaciones comentando primero qué operaciones ha realizado.
Al menos debe realizar cinco operaciones con los números introducidos.
Los números introducidos pueden condicionar las operaciones por lo que se debe avisar de qué números no son válidos
por ejemplo, no podemos realizar la raíz cuadrada de un número negativo o no se puede dividir entre cero.
'''
repetir = True #Variable para controlar el bucle y repetir mientras los datos introducido no sean numeros.
while repetir == True: #Comprobamos que la variable se correcta.
try:#intentamos guardar en numero1 y numero2 float si no es posible la conversion da un fallo.
numero1 = float(input("Dime un numero1: ")) #Solicitamos al usuario un numero y lo traformamos en un float.
numero2 = float(input("Dime un numero2: "))
repetir = False #si llega aqui todo va bien y el codigo continua.
except:# en caso de que falle indicamos que tiene que introducir 2 numeros.
print("Tiene que introducir 2 numeros")
print("Numero1: "+str(numero1))
print("Numero2: "+str(numero2))
# Nos piden que realizamos varias operaciones con estos numero.
# Suma
numero = numero1 + numero2
print(str(numero1)+" + "+str(numero2)+" = "+str(numero)+" --> SUMA")
# Resta
numero = numero1 - numero2
print(str(numero1)+" - "+str(numero2)+" = "+str(numero)+" --> RESTA")
# Multiplicación
numero = numero1 * numero2
print(str(numero1)+" * "+str(numero2)+" = "+str(numero)+" --> MULTIPLICACION")
# Potencia
try:
numero = numero1 ** numero2
print(str(numero1)+" ^ "+str(numero2)+" = "+str(numero)+" --> POTENCIA")
except:
print(str(numero1)+" ^ "+str(numero2)+" No crees que te has pasado.")
# División
try:
numero = numero1 / numero2
print(str(numero1)+" / "+str(numero2)+" = "+str(numero)+" --> DIVISIÓN")
except:
print("Intentado dividir por 0 bribón")
# Parte entera de una división
try:
numero = numero1 // numero2
print(str(numero1)+" // "+str(numero2)+" = "+str(numero)+" --> PARTE ENTERA")
except:
print("Intentado dividir por 0 bribón")
# Resto de una división
try:
numero = numero1 % numero2
print(str(numero1)+" % "+str(numero2)+" = "+str(numero)+" --> RESTO")
except:
print("Intentado dividir por 0 bribón") |
py | b40640649c10c0745f2fe740b65249a4725c074f | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Fredkin gate. Controlled-SWAP.
Author: Andrew Cross
"""
from qiskit import QuantumCircuit
from qiskit import CompositeGate
from qiskit.extensions.standard import header
from qiskit.extensions.standard import cx, ccx
class FredkinGate(CompositeGate):
"""Fredkin gate."""
def __init__(self, ctl, tgt1, tgt2, circ=None):
"""Create new Fredkin gate."""
super(FredkinGate, self).__init__("fredkin", [], [ctl, tgt1, tgt2],
circ)
self.cx(tgt2, tgt1)
self.ccx(ctl, tgt1, tgt2)
self.cx(tgt2, tgt1)
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.cswap(self.arg[0], self.arg[1], self.arg[2]))
def cswap(self, ctl, tgt1, tgt2):
"""Apply Fredkin to circuit."""
self._check_qubit(ctl)
self._check_qubit(tgt1)
self._check_qubit(tgt2)
self._check_dups([ctl, tgt1, tgt2])
return self._attach(FredkinGate(ctl, tgt1, tgt2, self))
QuantumCircuit.cswap = cswap
CompositeGate.cswap = cswap
|
py | b40640c420f86efd19ca5de02751343ee9f9ec97 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_gazebo_bot"
PROJECT_SPACE_DIR = "/home/pranshul/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
py | b40640e257263ab568d06c8f36ce248fef21b3f7 | # ----------------------------- #
# Copyright 2020 Ambra Di Piano #
# ----------------------------- # -------------------------------------------------- #
# Redistribution and use in source and binary forms, with or without modification, #
# are permitted provided that the following conditions are met: #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation and/or #
# other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its contributors #
# may be used to endorse or promote products derived from this software without #
# specific prior written permission. #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE #
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED #
# OF THE POSSIBILITY OF SUCH DAMAGE. #
# ---------------------------------------------------------------------------------- #
# ======================================================= #
# TESTING cssrcdetect FOR CTA-RTA PERFORMANCE EVALUATIONS #
# ======================================================= #
# IMPORTS ---!
from pkg_blindsearch import *
# from module_plot import *
import numpy as np
import csv
import os
import time
# --------------------------------- SETUP --------------------------------- !!!
# compact initialisation ---!
trials = 1 # trials
count = 0 # starting count
# cpus ---!
nthreads = 2
os.environ['OPENBLAS_NUM_THREADS'] = str(nthreads)
os.environ['MKL_NUM_THREADS'] = str(nthreads)
# ctools/cscripts parameters ---!
caldb = 'prod3b-v2'
# caldb_degraded = caldb.replace('prod', 'degr')
irf = 'South_z40_0.5h'
sigma = 5 # detection acceptance (Gaussian)
texp = (10, 100) # exposure times (s)
tdelay = 50 # slewing time (s)
tmax = []
for i in range(len(texp)):
tmax.append(tdelay + texp[i])
ttotal = 600 # 1e4 # maximum tobs (4h at least) simulation total time (s)
add_hours = 10 # 7200 # +2h observation time added after first none detection (s)
run_duration = 600 # 1200 # 20min observation run time for LST in RTA (s) ---!
elow = 0.03 # simulation minimum energy (TeV)
ehigh = 150.0 # simulation maximum energy (TeV)
emin = 0.03 # selection minimum energy (TeV)
emax = 150.0 # selection maximum energy (TeV)
roi = 5 # region of interest for simulation and selection (deg)
wbin = 0.02 # skymap bin width (deg)
corr_rad = 0.1 # Gaussian
max_src = 5 # max candidates
ts_threshold = 25 # TS threshold for reliable detection
highest_ts = ts_threshold # minimum threshold to repoint (repoint will be for monotonically higher ts only)
reduce_flux = None # flux will be devided by factor reduce_flux, if nominal then set to None
# conditions control ---!
checks1 = True # prints info
checks2 = False # prints more info
if_ebl = True # uses the EBL absorbed template
if_cut = False # adds a cut-off parameter to the source model
ebl_fits = False # generate the EBL absorbed template
extract_spec = True # generates spectral tables and obs definition models
irf_degrade = False # use degraded irf
compute_degr = False # compute irf degradation
src_sort = True # sorts scandidates from highest TS to lowest
use_runs = True # if True uses phlists of run_duration otherwise uese the template format
repoint = False # repoint to source coords after positive detection
skip_exist = False # skips the step if ID exists in csv
debug = False # prints logfiles on terminal
if_log = True # saves logfiles
# path configuration ---!
cfg = xmlConfig()
p = ConfigureXml(cfg)
# files ---!
targets = ['run0367_ID000227']
merger_maps = [p.getMergersDir() + target.replace('_ID', '_MergerID') + '_skymap.fits' for target in targets]
# recap and dof ---!
dof, m2, m1 = getDof()
print('!!! *** !!! dof = ', m2, ' - ', m1, ' = ', dof)
print('!!! *** !!! MODEL CUTOFF:', if_cut)
print('!!! *** !!! IRF DEGRADATION:', irf_degrade)
print('!!! *** !!! nominal caldb:', caldb)
print('!!! *** !!! irf:', irf)
print('!!! *** !!! TS SORT:', src_sort)
print('!!! *** !!! selection energy range: [', emin, ', ', emax, '] (TeV)')
print('!!! *** !!! roi:', roi, ' (deg)')
print('!!! *** !!! blind detection confidence:', sigma, ' sigmas')
print('!!! *** !!! detection confidence ts threshold:', ts_threshold)
print('!!! *** !!! total observation time:', ttotal, ' s')
print('!!! *** !!! additional observation time:', add_hours, ' s')
print('!!! *** !!! delay time:', tdelay, ' s\n')
if use_runs:
print('handle data in runs of', run_duration, 's\n')
else:
print('handle data with template structure\n')
del dof, m1, m2
# --------------------------------- INITIALIZE --------------------------------- !!!
count += 1
for idx, target in enumerate(targets):
p.setRunDir(target)
print('------------------------------------------\n\nTarget:', target)
if irf_degrade:
phlist = p.getObsDir() + 'deg%06.fits' % count
else:
phlist = p.getObsDir() + 'ebl%06.fits' % count
# pointing with off-axis equal to max prob GW ---!
pointing = getPointingAlert(merger_map=merger_maps[idx])
# pointing = [round(pointing[0], 3), round(pointing[1], 3)]
print('Pointing coordinates:', pointing)
# setup trials obj ---!
observation = Analysis()
observation.nthreads = nthreads
observation.pointing = pointing
observation.roi = roi
# degrade IRF if required ---!
observation.caldb = caldb
observation.irf = irf
if irf_degrade:
if compute_degr:
observation.degradeIrf()
observation.caldb = caldb.replace('prod', 'degr')
# --------------------------------- 1° LOOP :: trials --------------------------------- !!!
count += 1
observation.seed = count
clocking = tdelay - min(texp) # simulate flowing time (subsequent temporal windows of 1s)
GTIf = [run_duration for i in range(len(texp))] # LST runs are of 20mins chunks ---!
num = [1 for i in range(len(texp))] # count on LST-like run chunks ---!
print('\n\n!!! ************ STARTING TRIAL %d ************ !!!\n\n' % count) if checks1 else None
print('!!! check ---- seed=', observation.seed) if checks2 else None
# attach ID to fileroot ---!
f = target + 'ebl%06d' % count
# if irf_degrade:
# f += 'irf'
print('!!! check ---- obs:', target) if checks2 else None
# --------------------------------- 2° LOOP :: tbins --------------------------------- !!!
observation.e = [emin, emax]
twindows = [int((ttotal - tdelay) / texp[i]) for i in
range(len(texp))] # number of temporal windows per exposure time in total time ---!
tlast = [ttotal + tmax[i] for i in
range(len(texp))] # maximum observation time from last detection (not exceeding ttotal) ---!
for i, t in enumerate(tlast):
if t > ttotal:
tlast[i] = ttotal
is_detection = [True for i in
range(len(texp))] # controls which avoid forwarding of tlast for subsequent non-detections ---!
# looping through all light-curve time intervals ---!
for j in range(int(max(twindows))):
clocking += min(texp) # passing time second by second ---!
print(clocking, 'clock', tlast, is_detection) if checks1 else None
# --------------------------------- CLOCKING BREAK --------------------------------- !!!
# check tlast, if globally reached then stop current trial ---!
if clocking >= max(tlast):
print('end analysis trial', count, ' at clocking', tlast)
break
current_twindows = []
# --------------------------------- 3° LOOP :: texp in tbin --------------------------------- !!!
for i in range(len(texp)):
if j == 0:
current_twindows.append(texp[i])
else:
current_twindows.append(texp[i]) if clocking % texp[i] == 0 else None
# looping for all the texp for which the tbin analysis needs to be computed ---!
for i in range(len(current_twindows)):
# --------------------------------- CLOCKING SKIP --------------------------------- !!!
# check tlast, if locally reached then skip current bin ---!
index = texp.index(current_twindows[i])
if clocking > tlast[index]:
print('skip analysis texp', texp[index]) if checks1 else None
continue
# --------------------------------- CHECK SKIP --------------------------------- !!!
tbin = clocking / current_twindows[i] # temporal bin number of this analysis
IDbin = 'tbin%09d' % tbin
csv_name = p.getCsvDir() + 'tesi_tdel%d_deg%s_%ds.csv' % (tdelay, str(irf_degrade), texp[index])
if os.path.isfile(csv_name) and skip_exist:
skip = checkTrialId(csv_name, IDbin)
else:
skip = False
if skip_exist is True and skip is True:
continue
# --------------------------------- SELECTION TIME --------------------------------- !!!
# if first tbin of tepx then don't add clocking time to selection edges ---!
if clocking < tdelay:
continue
elif clocking == tdelay:
observation.t = [tdelay, tmax[index]]
elif clocking > tdelay and texp[index] == min(texp):
observation.t = [clocking, texp[index] + clocking]
elif clocking > tdelay and texp[index] != min(texp):
observation.t = [tdelay + clocking, tmax[index] + clocking]
if observation.t[1] > ttotal:
observation.t[1] = ttotal
# --------------------------------- OBSERVATION LIST --------------------------------- !!!
event_list = p.getObsDir() + 'obs_' + target + '.xml'
if os.path.isfile(event_list):
os.remove(event_list)
observation.input = phlist
observation.output = event_list
observation.obsList(obsname='run0406_ID000126')
print('phlist:', phlist) if checks2 else None
print('observation list:', event_list) if checks2 else None
# --------------------------------- SELECTION --------------------------------- !!!
event_selected = event_list.replace(p.getObsDir(), p.getSelectDir()).replace('obs_', 'texp%ds_' % texp[i])
prefix = p.getSelectDir() + 'texp%ds_' % texp[i]
# select events ---!
if os.path.isfile(event_selected):
os.remove(event_selected)
observation.input = event_list
observation.output = event_selected
observation.eventSelect(prefix=prefix)
print('selection', observation.output) if checks2 else None
# --------------------------------- SKYMAP --------------------------------- !!!
skymap = event_selected.replace(p.getSelectDir(), p.getDetDir()).replace('.xml', '_skymap.fits')
if os.path.isfile(skymap):
os.remove(skymap)
observation.input = event_selected
observation.output = skymap
observation.eventSkymap(wbin=wbin)
print('skymap', observation.output) if checks2 else None
# --------------------------------- DETECTION & MODELING --------------------------------- !!!
observation.corr_rad = corr_rad
observation.max_src = max_src
detectionXml = skymap.replace('_skymap.fits', '_det%dsgm.xml' % sigma)
if os.path.isfile(detectionXml):
os.remove(detectionXml)
observation.input = skymap
observation.output = detectionXml
observation.runDetection()
# showSkymap(file=skymap, reg=detectionXml.replace('.xml', '.reg'), show=False)
print('detection', observation.output) if checks2 else None
deobservation = ManageXml(detectionXml)
deobservation.sigma = sigma
deobservation.if_cut = if_cut
deobservation.modXml()
deobservation.prmsFreeFix()
# --------------------------------- CANDIDATES NUMBER --------------------------------- !!!
pos = [deobservation.loadRaDec()]
Ndet = len(pos[0][0])
# --------------------------------- MAX LIKELIHOOD --------------------------------- !!!
start_time = time.time() if checks1 else None
likeXml = detectionXml.replace('_det%dsgm' % observation.sigma, '_like%dsgm' % observation.sigma)
if os.path.isfile(likeXml):
os.remove(likeXml)
observation.input = event_selected
observation.model = detectionXml
observation.output = likeXml
observation.maxLikelihood()
if checks1:
end_time = time.time() - start_time
print(end_time, 's with texp=%d s' % texp[i])
print('likelihood', observation.output)
likeObj = ManageXml(likeXml)
if src_sort and Ndet > 0:
highest_ts_src = likeObj.sortSrcTs()[0]
print('!!! check ---- highest TS: ', highest_ts_src) if checks1 else None
else:
highest_ts_src = None
# --------------------------------- DETECTION RA & DEC --------------------------------- !!!
pos, ra_det, dec_det = ([] for n in range(3))
pos.append(deobservation.loadRaDec(highest=highest_ts_src))
ra_det.append(pos[0][0][0]) if len(pos[0][0]) > 0 else ra_det.append(np.nan)
dec_det.append(pos[0][1][0]) if len(pos[0][0]) > 0 else dec_det.append(np.nan)
Ndet = len(pos[0][0])
# --------------------------------- CLOSE DET XML --------------------------------- !!!
deobservation.closeXml()
del deobservation
# --------------------------------- BEST FIT TSV --------------------------------- !!!
ts_list, ts = ([] for n in range(2))
ts_list.append(likeObj.loadTs()) if Ndet > 0 else ts_list.append([np.nan])
# only first elem ---!
ts.append(ts_list[0][0])
print('ts:', ts[0]) if checks1 else None
# --------------------------------- Nsrc FOR TSV THRESHOLD --------------------------------- !!!
# count src with TS >= 9
n = 0
for k in range(len(ts_list[0])):
if float(ts_list[0][k]) >= ts_threshold:
n += 1
Nsrc = n
# --------------------------------- REPOINTING ---------------------------------- !!!
# if positive detection has been achieved, use cource coordinates not original pointing
if repoint:
if float(ts[0]) > highest_ts:
pointing = (ra_det[0], dec_det[0])
highest_ts = float(ts[0])
print('repointing to', pointing, 'with TS:', ts[0])
# --------------------------------- +2h FROM LAST DETECTION --------------------------------- !!!
# if no detection set False and defined end of observation ---!
if (float(ts[0]) < ts_threshold or str(ts[0]) == 'nan') and is_detection[index]:
is_detection[index] = False
# add 2hrs of obs time ---!
tlast[index] = observation.t[1] + add_hours # +2h ---!
print('+2h; tlast = ', tlast[index], ' with texp = ', texp[index], 'at clocking', clocking)
# only 4hrs of simulation avialable, if tlast exceeds them then reset to ttotal ---!
if tlast[index] > ttotal:
tlast[index] = ttotal
print('reset tlast = ', tlast[index], ' with texp = ', texp[index])
# if later detection then reset True ---!
elif float(ts[0]) >= ts_threshold and not is_detection[index]:
is_detection[index] = True
# --------------------------------- BEST FIT RA & DEC --------------------------------- !!!
ra_list, ra_fit, dec_list, dec_fit = ([] for n in range(4))
coord = likeObj.loadRaDec() if Ndet > 0 else None
ra_list.append(coord[0]) if Ndet > 0 else ra_list.append([np.nan])
dec_list.append(coord[1]) if Ndet > 0 else dec_list.append([np.nan])
# only first elem ---!
ra_fit.append(ra_list[0][0])
dec_fit.append(dec_list[0][0])
# --------------------------------- BEST FIT SPECTRAL --------------------------------- !!!
pref_list, pref, index_list, index, pivot_list, pivot = ([] for n in range(6))
pref_err_list, pref_err = ([] for n in range(2))
likeObj.if_cut = if_cut
spectral = likeObj.loadSpectral()
index_list.append(spectral[0]) if Ndet > 0 else index_list.append([np.nan])
pref_list.append(spectral[1]) if Ndet > 0 else pref_list.append([np.nan])
pivot_list.append(spectral[2]) if Ndet > 0 else pivot_list.append([np.nan])
error = likeObj.loadPrefError()
pref_err_list.append(error) if Ndet > 0 else pref_err_list.append([np.nan])
# only first elem ---!
index.append(index_list[0][0])
pref.append(pref_list[0][0])
pivot.append(pivot_list[0][0])
pref_err.append(pref_err_list[0][0])
# eventually cutoff ---!
if if_cut:
cutoff_list, cutoff = ([] for n in range(2))
cutoff_list.append(spectral[3]) if Ndet > 0 else cutoff_list.append([np.nan])
cutoff.append(cutoff_list[0][0])
# --------------------------------- INTEGRATED FLUX --------------------------------- !!!
flux_ph, flux_ph_err = ([] for n in range(2))
if Ndet > 0:
flux_ph.append(observation.photonFluxPowerLaw(index[0], pref[0], pivot[0])) # E (MeV)
flux_ph_err.append(observation.photonFluxPowerLaw(index[0], pref_err[0], pivot[0])) # E (MeV)
else:
flux_ph.append(np.nan)
flux_ph_err.append(np.nan)
# MISSING THE CUT-OFF OPTION ---!!!
# --------------------------------- CLOSE LIKE XML --------------------------------- !!!
likeObj.closeXml()
del likeObj
# --------------------------------- RESULTS TABLE (csv) --------------------------------- !!!
header = '#tbin,tinit,tend,Ndet,Nsrc,RA_det,DEC_det,RA_fit,DEC_fit,flux_ph,flux_ph_err,TS\n'
ID = 'ID%06d' % count
IDbin = 'tbin%09d' % tbin
row = []
if checks1:
print('!!! ---------- check trial:', count)
print('!!! ----- check texp:', texp[i], 's between: [', observation.t[0], ', ', observation.t[1],
' ] s')
print('!!! *** check Ndet:', Ndet)
print('!!! *** check Nsrc:', Nsrc)
print('!!! *** check ra_det:', ra_det[0])
print('!!! *** check dec_det:', dec_det[0])
print('!!! *** check ra_fit:', ra_fit[0])
print('!!! *** check dec_fit:', dec_fit[0])
print('!!! *** check flux_ph:', flux_ph[0])
print('!!! *** check flux_ph_err:', flux_ph_err[0])
print('!!! *** check ts:', ts[0])
print('!!! *** ---------------------------')
row.append(
[IDbin, observation.t[0], observation.t[1], Ndet, Nsrc, ra_det[0], dec_det[0], ra_fit[0], dec_fit[0],
flux_ph[0], flux_ph_err[0], ts[0]])
if os.path.isfile(csv_name):
with open(csv_name, 'a') as csv_file:
w = csv.writer(csv_file)
w.writerows(row)
csv_file.close()
else:
with open(csv_name, 'w+') as csv_file:
csv_file.write(header)
w = csv.writer(csv_file)
w.writerows(row)
csv_file.close()
# --------------------------------- CLEAR SPACE --------------------------------- !!!
os.system('rm ' + p.getSelectDir() + '*ebl%06d*' % count)
os.system('rm ' + p.getDetDir() + '*ebl%06d*' % count)
del observation
print('\n\n!!! ================== END ================== !!!\n\n')
|
py | b406410c8110bca6a6e143ad64ac5c74160511d4 | from distutils.core import setup
setup(
name='colorDetection',
version='',
packages=[''],
url='',
license='',
author='yoonheeh',
author_email='',
description=''
)
|
py | b406411af3542daf9853622100e08ce1af8316eb | import json
import os
import random
import shlex
import shutil
import sys
import textwrap
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import contextmanager
import bottle
import requests
from mock import Mock
from requests.exceptions import HTTPError
from six.moves.urllib.parse import urlsplit, urlunsplit
from webtest.app import TestApp
from conans import load
from conans.cli.cli import Cli
from conans.client.api.conan_api import ConanAPIV2
from conans.client.cache.cache import ClientCache
from conans.client.cache.remote_registry import Remotes
from conans.client.command import Command
from conans.client.conan_api import Conan
from conans.client.rest.file_uploader import IterableToFileAdapter
from conans.client.runner import ConanRunner
from conans.client.tools import environment_append
from conans.client.tools.files import replace_in_file
from conans.errors import NotFoundException, RecipeNotFoundException, PackageNotFoundException
from conans.model.manifest import FileTreeManifest
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference, PackageReference
from conans.model.settings import Settings
from conans.server.revision_list import _RevisionEntry
from cpt.test.assets import copy_assets
from cpt.test.assets.genconanfile import GenConanfile
from cpt.test.utils.mocks import MockedUserIO, TestBufferConanOutput
from cpt.test.utils.scm import create_local_git_repo, create_local_svn_checkout, \
create_remote_svn_repo
from cpt.test.utils.server_launcher import (TESTING_REMOTE_PRIVATE_PASS,
TESTING_REMOTE_PRIVATE_USER,
TestServerLauncher)
from cpt.test.utils.test_files import temp_folder
from conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR
from conans.util.env_reader import get_env
from conans.util.files import mkdir, save_files
NO_SETTINGS_PACKAGE_ID = "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"
ARTIFACTORY_DEFAULT_USER = os.getenv("ARTIFACTORY_DEFAULT_USER", "admin")
ARTIFACTORY_DEFAULT_PASSWORD = os.getenv("ARTIFACTORY_DEFAULT_PASSWORD", "password")
ARTIFACTORY_DEFAULT_URL = os.getenv("ARTIFACTORY_DEFAULT_URL", "http://localhost:8090/artifactory")
def inc_recipe_manifest_timestamp(cache, reference, inc_time):
ref = ConanFileReference.loads(reference)
path = cache.package_layout(ref).export()
manifest = FileTreeManifest.load(path)
manifest.time += inc_time
manifest.save(path)
def inc_package_manifest_timestamp(cache, package_reference, inc_time):
pref = PackageReference.loads(package_reference)
path = cache.package_layout(pref.ref).package(pref)
manifest = FileTreeManifest.load(path)
manifest.time += inc_time
manifest.save(path)
def test_profile(profile=None, settings=None):
if profile is None:
profile = Profile()
if profile.processed_settings is None:
profile.processed_settings = settings or Settings()
return profile
class TestingResponse(object):
"""Wraps a response from TestApp external tool
to guarantee the presence of response.ok, response.content
and response.status_code, as it was a requests library object.
Is instanced by TestRequester on each request"""
def __init__(self, test_response):
self.test_response = test_response
def close(self):
pass # Compatibility with close() method of a requests when stream=True
@property
def headers(self):
return self.test_response.headers
@property
def ok(self):
return self.test_response.status_code == 200
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s' % (self.status_code, self.content)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s' % (self.status_code, self.content)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
@property
def content(self):
return self.test_response.body
@property
def charset(self):
return self.test_response.charset
@charset.setter
def charset(self, newcharset):
self.test_response.charset = newcharset
@property
def text(self):
return self.test_response.text
def iter_content(self, chunk_size=1): # @UnusedVariable
return [self.content]
@property
def status_code(self):
return self.test_response.status_code
def json(self):
try:
return json.loads(self.test_response.content)
except:
raise ValueError("The response is not a JSON")
class TestRequester(object):
"""Fake requests module calling server applications
with TestApp"""
def __init__(self, test_servers):
self.test_servers = test_servers
@staticmethod
def _get_url_path(url):
# Remove schema from url
_, _, path, query, _ = urlsplit(url)
url = urlunsplit(("", "", path, query, ""))
return url
def _get_wsgi_app(self, url):
for test_server in self.test_servers.values():
if url.startswith(test_server.fake_url):
return test_server.app
raise Exception("Testing error: Not remote found")
def get(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.get(url, **kwargs)
return TestingResponse(response)
else:
return requests.get(url, **kwargs)
def put(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.put(url, **kwargs)
return TestingResponse(response)
else:
return requests.put(url, **kwargs)
def delete(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.delete(url, **kwargs)
return TestingResponse(response)
else:
return requests.delete(url, **kwargs)
def post(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.post(url, **kwargs)
return TestingResponse(response)
else:
requests.post(url, **kwargs)
def _prepare_call(self, url, kwargs):
if not url.startswith("http://fake"): # Call to S3 (or external), perform a real request
return None, url
app = self._get_wsgi_app(url)
url = self._get_url_path(url) # Remove http://server.com
self._set_auth_headers(kwargs)
if app:
kwargs["expect_errors"] = True
kwargs.pop("stream", None)
kwargs.pop("verify", None)
kwargs.pop("auth", None)
kwargs.pop("cert", None)
kwargs.pop("timeout", None)
if "data" in kwargs:
if isinstance(kwargs["data"], IterableToFileAdapter):
data_accum = b""
for tmp in kwargs["data"]:
data_accum += tmp
kwargs["data"] = data_accum
kwargs["params"] = kwargs["data"]
del kwargs["data"] # Parameter in test app is called "params"
if kwargs.get("json"):
# json is a high level parameter of requests, not a generic one
# translate it to data and content_type
kwargs["params"] = json.dumps(kwargs["json"])
kwargs["content_type"] = "application/json"
kwargs.pop("json", None)
return app, url
@staticmethod
def _set_auth_headers(kwargs):
if kwargs.get("auth"):
mock_request = Mock()
mock_request.headers = {}
kwargs["auth"](mock_request)
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"].update(mock_request.headers)
class ArtifactoryServerStore(object):
def __init__(self, repo_url, user, password):
self._user = user or ARTIFACTORY_DEFAULT_USER
self._password = password or ARTIFACTORY_DEFAULT_PASSWORD
self._repo_url = repo_url
@property
def _auth(self):
return self._user, self._password
@staticmethod
def _root_recipe(ref):
return "{}/{}/{}/{}".format(ref.user, ref.name, ref.version, ref.channel)
@staticmethod
def _ref_index(ref):
return "{}/index.json".format(ArtifactoryServerStore._root_recipe(ref))
@staticmethod
def _pref_index(pref):
tmp = ArtifactoryServerStore._root_recipe(pref.ref)
return "{}/{}/package/{}/index.json".format(tmp, pref.ref.revision, pref.id)
def get_recipe_revisions(self, ref):
time.sleep(0.1) # Index appears to not being updated immediately after a remove
url = "{}/{}".format(self._repo_url, self._ref_index(ref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise RecipeNotFoundException(ref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_package_revisions(self, pref):
time.sleep(0.1) # Index appears to not being updated immediately
url = "{}/{}".format(self._repo_url, self._pref_index(pref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise PackageNotFoundException(pref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_last_revision(self, ref):
revisions = self.get_recipe_revisions(ref)
return revisions[0]
def get_last_package_revision(self, ref):
revisions = self.get_package_revisions(ref)
return revisions[0]
def package_exists(self, pref):
try:
if pref.revision:
path = self.server_store.package(pref)
else:
path = self.test_server.server_store.package_revisions_root(pref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
class ArtifactoryServer(object):
def __init__(self, *args, **kwargs):
self._user = ARTIFACTORY_DEFAULT_USER
self._password = ARTIFACTORY_DEFAULT_PASSWORD
self._url = ARTIFACTORY_DEFAULT_URL
self._repo_name = "conan_{}".format(str(uuid.uuid4()).replace("-", ""))
self.create_repository()
self.server_store = ArtifactoryServerStore(self.repo_url, self._user, self._password)
@property
def _auth(self):
return self._user, self._password
@property
def repo_url(self):
return "{}/{}".format(self._url, self._repo_name)
@property
def repo_api_url(self):
return "{}/api/conan/{}".format(self._url, self._repo_name)
def recipe_revision_time(self, ref):
revs = self.server_store.get_recipe_revisions(ref)
for r in revs:
if r.revision == ref.revision:
return r.time
return None
def package_revision_time(self, pref):
revs = self.server_store.get_package_revisions(pref)
for r in revs:
if r.revision == pref.revision:
return r.time
return None
def create_repository(self):
url = "{}/api/repositories/{}".format(self._url, self._repo_name)
config = {"key": self._repo_name, "rclass": "local", "packageType": "conan"}
ret = requests.put(url, auth=self._auth, json=config)
ret.raise_for_status()
def package_exists(self, pref):
try:
revisions = self.server_store.get_package_revisions(pref)
if pref.revision:
for r in revisions:
if pref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
def recipe_exists(self, ref):
try:
revisions = self.server_store.get_recipe_revisions(ref)
if ref.revision:
for r in revisions:
if ref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
class TestServer(object):
def __init__(self, read_permissions=None,
write_permissions=None, users=None, plugins=None, base_path=None,
server_capabilities=None, complete_urls=False):
"""
'read_permissions' and 'write_permissions' is a list of:
[("opencv/2.3.4@lasote/testing", "user1, user2")]
'users': {username: plain-text-passwd}
"""
# Unique identifier for this server, will be used by TestRequester
# to determine where to call. Why? remote_manager just assing an url
# to the rest_client, so rest_client doesn't know about object instances,
# just urls, so testing framework performs a map between fake urls and instances
if read_permissions is None:
read_permissions = [("*/*@*/*", "*")]
if write_permissions is None:
write_permissions = []
if users is None:
users = {"lasote": "mypass", "conan": "password"}
self.fake_url = "http://fake%s.com" % str(uuid.uuid4()).replace("-", "")
base_url = "%s/v1" % self.fake_url if complete_urls else "v1"
self.test_server = TestServerLauncher(base_path, read_permissions,
write_permissions, users,
base_url=base_url,
plugins=plugins,
server_capabilities=server_capabilities)
self.app = TestApp(self.test_server.ra.root_app)
@property
def server_store(self):
return self.test_server.server_store
def __repr__(self):
return "TestServer @ " + self.fake_url
def __str__(self):
return self.fake_url
def recipe_exists(self, ref):
try:
if not ref.revision:
path = self.test_server.server_store.conan_revisions_root(ref)
else:
path = self.test_server.server_store.base_folder(ref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
def package_exists(self, pref):
try:
if pref.revision:
path = self.test_server.server_store.package(pref)
else:
path = self.test_server.server_store.package_revisions_root(pref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
def latest_recipe(self, ref):
rev, _ = self.test_server.server_store.get_last_revision(ref)
return ref.copy_with_rev(rev)
def recipe_revision_time(self, ref):
if not ref.revision:
raise Exception("Pass a ref with revision (Testing framework)")
return self.test_server.server_store.get_revision_time(ref)
def latest_package(self, pref):
if not pref.ref.revision:
raise Exception("Pass a pref with .rev.revision (Testing framework)")
prev = self.test_server.server_store.get_last_package_revision(pref)
return pref.copy_with_revs(pref.ref.revision, prev)
def package_revision_time(self, pref):
if not pref:
raise Exception("Pass a pref with revision (Testing framework)")
tmp = self.test_server.server_store.get_package_revision_time(pref)
return tmp
if get_env("CONAN_TEST_WITH_ARTIFACTORY", False):
TestServer = ArtifactoryServer
def _copy_cache_folder(target_folder):
# Some variables affect to cache population (take a different default folder)
vars = [CONAN_V2_MODE_ENVVAR, 'CC', 'CXX', 'PATH']
cache_key = hash('|'.join(map(str, [os.environ.get(it, None) for it in vars])))
master_folder = _copy_cache_folder.master.setdefault(cache_key, temp_folder(create_dir=False))
if not os.path.exists(master_folder):
# Create and populate the cache folder with the defaults
cache = ClientCache(master_folder, TestBufferConanOutput())
cache.initialize_config()
cache.registry.initialize_remotes()
cache.initialize_default_profile()
cache.initialize_settings()
shutil.copytree(master_folder, target_folder)
_copy_cache_folder.master = dict() # temp_folder(create_dir=False)
@contextmanager
def redirect_output(target):
original_stdout = sys.stdout
original_stderr = sys.stderr
#TODO: change in 2.0
# redirecting both of them to the same target for the moment
# to assign to Testclient out
sys.stdout = target
sys.stderr = target
try:
yield
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
class TestClient(object):
""" Test wrap of the conans application to launch tests in the same way as
in command line
"""
def __init__(self, cache_folder=None, current_folder=None, servers=None, users=None,
requester_class=None, runner=None, path_with_spaces=True,
revisions_enabled=None, cpu_count=1, default_server_user=None,
cache_autopopulate=True):
"""
current_folder: Current execution folder
servers: dict of {remote_name: TestServer}
logins is a list of (user, password) for auto input in order
if required==> [("lasote", "mypass"), ("other", "otherpass")]
"""
if default_server_user is not None:
if servers is not None:
raise Exception("Cannot define both 'servers' and 'default_server_user'")
if users is not None:
raise Exception("Cannot define both 'users' and 'default_server_user'")
if default_server_user is True:
server_users = {"user": "password"}
users = {"default": [("user", "password")]}
else:
server_users = default_server_user
users = {"default": list(default_server_user.items())}
# Allow write permissions to users
server = TestServer(users=server_users, write_permissions=[("*/*@*/*", "*")])
servers = {"default": server}
self.users = users
if self.users is None:
self.users = {"default": [(TESTING_REMOTE_PRIVATE_USER, TESTING_REMOTE_PRIVATE_PASS)]}
if cache_autopopulate and (not cache_folder or not os.path.exists(cache_folder)):
# Copy a cache folder already populated
self.cache_folder = cache_folder or temp_folder(path_with_spaces, create_dir=False)
_copy_cache_folder(self.cache_folder)
else:
self.cache_folder = cache_folder or temp_folder(path_with_spaces)
self.requester_class = requester_class
self.runner = runner
if servers and len(servers) > 1 and not isinstance(servers, OrderedDict):
raise Exception(textwrap.dedent("""
Testing framework error: Servers should be an OrderedDict. e.g:
servers = OrderedDict()
servers["r1"] = server
servers["r2"] = TestServer()
"""))
self.servers = servers or {}
if servers is not False: # Do not mess with registry remotes
self.update_servers()
self.current_folder = current_folder or temp_folder(path_with_spaces)
# Once the client is ready, modify the configuration
mkdir(self.current_folder)
self.tune_conan_conf(cache_folder, cpu_count, revisions_enabled)
def load(self, filename):
return load(os.path.join(self.current_folder, filename))
@property
def cache(self):
# Returns a temporary cache object intended for inspecting it
return ClientCache(self.cache_folder, TestBufferConanOutput())
@property
def base_folder(self):
# Temporary hack to refactor ConanApp with less changes
return self.cache_folder
@property
def storage_folder(self):
return self.cache.store
@property
def requester(self):
api = self.get_conan_api()
api.create_app()
return api.app.requester
@property
def proxy(self):
api = self.get_conan_api()
api.create_app()
return api.app.proxy
@property
def _http_requester(self):
# Check if servers are real
real_servers = any(isinstance(s, (str, ArtifactoryServer))
for s in self.servers.values())
if not real_servers:
if self.requester_class:
return self.requester_class(self.servers)
else:
return TestRequester(self.servers)
def _set_revisions(self, value):
value = "1" if value else "0"
self.run("config set general.revisions_enabled={}".format(value))
def enable_revisions(self):
self._set_revisions(True)
assert self.cache.config.revisions_enabled
def disable_revisions(self):
self._set_revisions(False)
assert not self.cache.config.revisions_enabled
def tune_conan_conf(self, cache_folder, cpu_count, revisions_enabled):
# Create the default
cache = self.cache
_ = cache.config
if cpu_count:
replace_in_file(cache.conan_conf_path,
"# cpu_count = 1", "cpu_count = %s" % cpu_count,
output=TestBufferConanOutput(), strict=not bool(cache_folder))
if revisions_enabled is not None:
self._set_revisions(revisions_enabled)
elif "TESTING_REVISIONS_ENABLED" in os.environ:
value = get_env("TESTING_REVISIONS_ENABLED", True)
self._set_revisions(value)
def update_servers(self):
cache = self.cache
Remotes().save(cache.remotes_path)
registry = cache.registry
for name, server in self.servers.items():
if isinstance(server, ArtifactoryServer):
registry.add(name, server.repo_api_url)
self.users.update({name: [(ARTIFACTORY_DEFAULT_USER,
ARTIFACTORY_DEFAULT_PASSWORD)]})
elif isinstance(server, TestServer):
registry.add(name, server.fake_url)
else:
registry.add(name, server)
@contextmanager
def chdir(self, newdir):
old_dir = self.current_folder
if not os.path.isabs(newdir):
newdir = os.path.join(old_dir, newdir)
mkdir(newdir)
self.current_folder = newdir
try:
yield
finally:
self.current_folder = old_dir
def get_conan_api_v2(self):
user_io = MockedUserIO(self.users, out=sys.stderr)
conan = ConanAPIV2(cache_folder=self.cache_folder, quiet=False, user_io=user_io,
http_requester=self._http_requester, runner=self.runner)
return conan
def get_conan_api_v1(self, user_io=None):
if user_io:
self.out = user_io.out
else:
self.out = TestBufferConanOutput()
user_io = user_io or MockedUserIO(self.users, out=self.out)
conan = Conan(cache_folder=self.cache_folder, output=self.out, user_io=user_io,
http_requester=self._http_requester, runner=self.runner)
return conan
def get_conan_api(self, user_io=None):
if os.getenv("CONAN_V2_CLI"):
return self.get_conan_api_v2()
else:
return self.get_conan_api_v1(user_io)
def run_cli(self, command_line, user_io=None, assert_error=False):
conan = self.get_conan_api(user_io)
self.api = conan
if os.getenv("CONAN_V2_CLI"):
command = Cli(conan)
else:
command = Command(conan)
args = shlex.split(command_line)
current_dir = os.getcwd()
os.chdir(self.current_folder)
old_path = sys.path[:]
old_modules = list(sys.modules.keys())
try:
error = command.run(args)
finally:
sys.path = old_path
os.chdir(current_dir)
# Reset sys.modules to its prev state. A .copy() DOES NOT WORK
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
sys.modules.pop(added, None)
self._handle_cli_result(command_line, assert_error=assert_error, error=error)
return error
def run(self, command_line, user_io=None, assert_error=False):
""" run a single command as in the command line.
If user or password is filled, user_io will be mocked to return this
tuple if required
"""
# TODO: remove in 2.0
if os.getenv("CONAN_V2_CLI"):
from cpt.test.utils.mocks import RedirectedTestOutput
self.out = RedirectedTestOutput()
with redirect_output(self.out):
error = self.run_cli(command_line, user_io=user_io, assert_error=assert_error)
else:
error = self.run_cli(command_line, user_io=user_io, assert_error=assert_error)
return error
def run_command(self, command, cwd=None, assert_error=False):
output = TestBufferConanOutput()
self.out = output
runner = ConanRunner(output=output)
ret = runner(command, cwd=cwd or self.current_folder)
self._handle_cli_result(command, assert_error=assert_error, error=ret)
return ret
def _handle_cli_result(self, command, assert_error, error):
if (assert_error and not error) or (not assert_error and error):
if assert_error:
msg = " Command succeeded (failure expected): "
else:
msg = " Command failed (unexpectedly): "
exc_message = "\n{header}\n{cmd}\n{output_header}\n{output}\n{output_footer}\n".format(
header='{:-^80}'.format(msg),
output_header='{:-^80}'.format(" Output: "),
output_footer='-' * 80,
cmd=command,
output=self.out
)
raise Exception(exc_message)
def save(self, files, path=None, clean_first=False):
""" helper metod, will store files in the current folder
param files: dict{filename: filecontents}
"""
path = path or self.current_folder
if clean_first:
shutil.rmtree(self.current_folder, ignore_errors=True)
files = {f: str(content) for f, content in files.items()}
save_files(path, files)
if not files:
mkdir(self.current_folder)
def copy_assets(self, origin_folder, assets=None):
copy_assets(origin_folder, self.current_folder, assets)
# Higher level operations
def remove_all(self):
self.run("remove '*' -f")
def export(self, ref, conanfile=GenConanfile(), args=None):
""" export a ConanFile with as "ref" and return the reference with recipe revision
"""
if conanfile:
self.save({"conanfile.py": conanfile})
self.run("export . {} {}".format(ref.full_str(), args or ""))
rrev = self.cache.package_layout(ref).recipe_revision()
return ref.copy_with_rev(rrev)
def init_git_repo(self, files=None, branch=None, submodules=None, folder=None, origin_url=None):
if folder is not None:
folder = os.path.join(self.current_folder, folder)
else:
folder = self.current_folder
_, commit = create_local_git_repo(files, branch, submodules, folder=folder,
origin_url=origin_url)
return commit
class TurboTestClient(TestClient):
tmp_json_name = ".tmp_json"
def __init__(self, *args, **kwargs):
if "users" not in kwargs and "default_server_user" not in kwargs:
from collections import defaultdict
kwargs["users"] = defaultdict(lambda: [("conan", "password")])
super(TurboTestClient, self).__init__(*args, **kwargs)
def create(self, ref, conanfile=GenConanfile(), args=None, assert_error=False):
if conanfile:
self.save({"conanfile.py": conanfile})
full_str = "{}@".format(ref.full_str()) if not ref.user else ref.full_str()
self.run("create . {} {} --json {}".format(full_str,
args or "", self.tmp_json_name),
assert_error=assert_error)
rrev = self.cache.package_layout(ref).recipe_revision()
data = json.loads(self.load(self.tmp_json_name))
if assert_error:
return None
package_id = data["installed"][0]["packages"][0]["id"]
package_ref = PackageReference(ref, package_id)
prev = self.cache.package_layout(ref.copy_clear_rev()).package_revision(package_ref)
return package_ref.copy_with_revs(rrev, prev)
def upload_all(self, ref, remote=None, args=None, assert_error=False):
remote = remote or list(self.servers.keys())[0]
self.run("upload {} -c --all -r {} {}".format(ref.full_str(), remote, args or ""),
assert_error=assert_error)
if not assert_error:
remote_rrev, _ = self.servers[remote].server_store.get_last_revision(ref)
return ref.copy_with_rev(remote_rrev)
return
def export_pkg(self, ref, conanfile=GenConanfile(), args=None, assert_error=False):
if conanfile:
self.save({"conanfile.py": conanfile})
self.run("export-pkg . {} {} --json {}".format(ref.full_str(),
args or "", self.tmp_json_name),
assert_error=assert_error)
rrev = self.cache.package_layout(ref).recipe_revision()
data = json.loads(self.load(self.tmp_json_name))
if assert_error:
return None
package_id = data["installed"][0]["packages"][0]["id"]
package_ref = PackageReference(ref, package_id)
prev = self.cache.package_layout(ref.copy_clear_rev()).package_revision(package_ref)
return package_ref.copy_with_revs(rrev, prev)
def recipe_exists(self, ref):
return self.cache.package_layout(ref).recipe_exists()
def package_exists(self, pref):
return self.cache.package_layout(pref.ref).package_exists(pref)
def recipe_revision(self, ref):
return self.cache.package_layout(ref).recipe_revision()
def package_revision(self, pref):
return self.cache.package_layout(pref.ref).package_revision(pref)
def search(self, pattern, remote=None, assert_error=False, args=None):
remote = " -r={}".format(remote) if remote else ""
self.run("search {} --json {} {} {}".format(pattern, self.tmp_json_name, remote,
args or ""),
assert_error=assert_error)
data = json.loads(self.load(self.tmp_json_name))
return data
def massive_uploader(self, ref, revisions, num_prev, remote=None):
"""Uploads N revisions with M package revisions. The revisions can be specified like:
revisions = [{"os": "Windows"}, {"os": "Linux"}], \
[{"os": "Macos"}], \
[{"os": "Solaris"}, {"os": "FreeBSD"}]
IMPORTANT: Different settings keys will cause different recipe revisions
"""
remote = remote or "default"
ret = []
for i, settings_groups in enumerate(revisions):
tmp = []
for settings in settings_groups:
conanfile_gen = GenConanfile(). \
with_build_msg("REV{}".format(i)). \
with_package_file("file", env_var="MY_VAR")
for s in settings.keys():
conanfile_gen = conanfile_gen.with_setting(s)
for k in range(num_prev):
args = " ".join(["-s {}={}".format(key, value)
for key, value in settings.items()])
with environment_append({"MY_VAR": str(k)}):
pref = self.create(ref, conanfile=conanfile_gen, args=args)
self.upload_all(ref, remote=remote)
tmp.append(pref)
ret.append(tmp)
return ret
def init_svn_repo(self, subpath, files=None, repo_url=None):
if not repo_url:
repo_url = create_remote_svn_repo(temp_folder())
_, rev = create_local_svn_checkout(files, repo_url, folder=self.current_folder,
rel_project_path=subpath, delete_checkout=False)
return rev
class StoppableThreadBottle(threading.Thread):
"""
Real server to test download endpoints
"""
def __init__(self, host=None, port=None):
self.host = host or "127.0.0.1"
self.port = port or random.randrange(48000, 49151)
self.server = bottle.Bottle()
super(StoppableThreadBottle, self).__init__(target=self.server.run,
kwargs={"host": self.host, "port": self.port})
self.daemon = True
self._stop = threading.Event()
def stop(self):
self._stop.set()
def run_server(self):
self.start()
time.sleep(1)
|
py | b40641d093deed7fb5d5752f621be1fd3360e83c | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from typing import Any, Sequence
from libcst._nodes.module import Module
from libcst._nodes.whitespace import NEWLINE_RE
from libcst._parser.production_decorator import with_production
from libcst._parser.types.config import ParserConfig
@with_production("file_input", "(NEWLINE | stmt)* ENDMARKER")
def convert_file_input(config: ParserConfig, children: Sequence[Any]) -> Any:
*body, footer = children
if len(body) == 0:
# If there's no body, the header and footer are ambiguous. The header is more
# important, and should own the EmptyLine nodes instead of the footer.
header = footer
footer = ()
if (
len(config.lines) == 2
and NEWLINE_RE.fullmatch(config.lines[0])
and config.lines[1] == ""
):
# This is an empty file (not even a comment), so special-case this to an
# empty list instead of a single dummy EmptyLine (which is what we'd
# normally parse).
header = ()
else:
# Steal the leading lines from the first statement, and move them into the
# header.
first_stmt = body[0]
header = first_stmt.leading_lines
body[0] = first_stmt.with_changes(leading_lines=())
return Module(
header=header,
body=body,
footer=footer,
encoding=config.encoding,
default_indent=config.default_indent,
default_newline=config.default_newline,
has_trailing_newline=config.has_trailing_newline,
)
|
py | b4064248f87cb70af0b495aea2bd6ac28f3e5006 | from ..flair_loader.skin_loader_withcheck import Skin_Loader
from ..flair_management.skin_manager.skin_manager import Skin_Manager
from ..flair_management.loadout_manager.loadouts_manager import Loadouts_Manager
class Completer:
cli = None
@staticmethod
def generate_completer_dict():
'''Generate command autocomplete data'''
data = {
"randomize": {
"skins": None,
"buddies": None
},
"modify": None,
"reset": None,
"loadout": {},
"config": None,
"set": {},
"reload": None,
"exit": None,
}
# generate autocomplete for "set" command
# im actually so happy this worked; made validation for setting skins SO easy to implement
def build_set_autocomplete():
skin_data = Skin_Manager.fetch_skin_data()
weapons = {}
for uuid,weapon in skin_data.items():
weapons[weapon['display_name']] = {
skin['display_name'].replace(" ","-"): {
level['display_name'].replace(" ","-"): {
chroma['display_name'].replace(" ","-"): {} for _,chroma in skin['chromas'].items()
} for _,level in skin['levels'].items()
} for _,skin in weapon['skins'].items()
}
data['set'] = weapons
def build_loadout_autocomplete():
data["loadout"] = {
"create": None,
"equip": {},
"preview": {},
"delete": {},
}
loadouts = Loadouts_Manager.fetch_all_loadouts()
if loadouts is not None:
for loadout in loadouts:
data["loadout"]["equip"][loadout["name"].replace(" ", "-")] = None
data["loadout"]["preview"][loadout["name"].replace(" ", "-")] = None
data["loadout"]["delete"][loadout["name"].replace(" ", "-")] = None
build_set_autocomplete()
build_loadout_autocomplete()
Completer.cli.commands = data
return data |
py | b4064299ca2aa0df22ce86b9bbf19bdcb5d1536b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2020-09-18 00:00:13
@LastEditors : yanyongyu
@LastEditTime : 2022-01-13 21:01:33
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
from nonebot.matcher import Matcher
from nonebot.permission import SUPERUSER
from nonebot import on_notice, get_driver, on_command, on_message
from .config import Config
from .data_source import cpu_status, disk_usage, memory_status, per_cpu_status
global_config = get_driver().config
status_config = Config(**global_config.dict())
command = on_command(
"状态",
permission=(status_config.server_status_only_superusers or None) and SUPERUSER,
priority=10,
)
@command.handle()
async def server_status(matcher: Matcher):
data = []
if status_config.server_status_cpu:
if status_config.server_status_per_cpu:
data.append("CPU:")
for index, per_cpu in enumerate(per_cpu_status()):
data.append(f" core{index + 1}: {int(per_cpu):02d}%")
else:
data.append(f"CPU: {int(cpu_status()):02d}%")
if status_config.server_status_memory:
data.append(f"Memory: {int(memory_status()):02d}%")
if status_config.server_status_disk:
data.append("Disk:")
for k, v in disk_usage().items():
data.append(f" {k}: {int(v.percent):02d}%")
await matcher.send(message="\n".join(data))
try:
from nonebot.adapters.onebot.v11 import PokeNotifyEvent, PrivateMessageEvent
except ImportError:
pass
else:
async def _group_poke(event: PokeNotifyEvent) -> bool:
return event.is_tome() and (
not status_config.server_status_only_superusers
or str(event.user_id) in global_config.superusers
)
group_poke = on_notice(_group_poke, priority=10, block=True)
group_poke.handle()(server_status)
async def _poke(event: PrivateMessageEvent) -> bool:
return event.sub_type == "friend" and event.message[0].type == "poke"
poke = on_message(
_poke,
permission=(status_config.server_status_only_superusers or None) and SUPERUSER,
priority=10,
)
poke.handle()(server_status)
|
py | b40642acedbda71abaa041a5d7d6dcbb50a6f4ef | from osrsmath.apps.GUI.optimize.optimize_skeleton import Ui_Form
from osrsmath.apps.GUI.shared.widgets import Savable
from PySide2 import QtCore, QtGui, QtWidgets
from osrsmath.combat.spells import STANDARD, ANCIENT
from osrsmath.combat.equipment import EquipmentPoolFiltered
import osrsmath.combat.boosts as boosts
import inspect
import webbrowser
import os
from urllib.parse import quote
from pathlib import Path
from pprint import pprint
class Data:
pass
def disable(obj):
# obj.setEnabled(False)
obj.hide()
def enable(obj):
obj.show()
# if not obj.isEnabled():
# obj.setEnabled(True)
class OptimizePanel(QtWidgets.QWidget, Ui_Form, Savable):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.data = Data()
self.data.monsters = {}
self.special_sets = [
'dharok',
'slayer_helm',
'obsidian',
'void_knight',
'elite_void',
'berserker_necklace',
'salve_amulet',
'thammaron',
'viggoras',
'DHL',
'DHCB',
'crawsbow',
]
self.entities = {
'monsters': Savable.Entity(
None, None,
lambda o, v: {self.add_monster(name, monster) for name, monster in v.items()},
lambda v: self.data.monsters
),
'cpu_cores': Savable.LineEdit(self.cpu_cores, 0),
'training_skill': Savable.DropDown(self.training_skill, None),
'potions': Savable.DropDown(self.potions, None),
'potion_attributes': Savable.DropDown(self.potion_attributes, None),
'boosting_scheme': Savable.DropDown(self.boosting_scheme, None),
'below_skill': Savable.DropDown(self.below_skill, None),
'redose_level': Savable.LineEdit(self.redose_level, None),
'prayers': Savable.DropDown(self.prayers, None),
'prayer_attributes': Savable.DropDown(self.prayer_attributes, None),
'spell': Savable.DropDown(self.spell, None),
'function': Savable.DropDown(self.function, None),
'start_end': Savable.LineEdit(self.start_end, 'asd1,1,1-99,99,99'),
**{s: Savable.CheckBox(getattr(self, s), True) for s in self.special_sets if s != 'dharok'},
'dharok': Savable.LineEdit(self.dharok, 1),
'show_histogram': Savable.CheckBox(self.show_histogram, False),
}
for slot in ['head', 'cape', 'neck', 'ammo', 'weapon', 'body', 'legs', 'hands', 'feet', 'ring']:
getattr(self, f"{slot}_link").setToolTip('Open the wiki page for the equipment in this slot.')
equipment_button = getattr(self, f"{slot}_link")
equipment_button.clicked.connect(
lambda _=None, slot=slot: self.open_link(slot)
)
def get_members(cls):
members = inspect.getmembers(cls, predicate=inspect.isfunction)
members.sort(key=lambda m: inspect.getsourcelines(m[1])[1])
names, functions = list(zip(*members))
return names
self.potions.addItems(get_members(boosts.Potions))
self.prayers.addItems(get_members(boosts.Prayers))
self.function.currentIndexChanged.connect(self.on_function_select)
self.training_skill.currentIndexChanged.connect(self.on_training_skill_select)
self.potions.currentIndexChanged.connect(self.on_potion_select)
self.boosting_scheme.currentIndexChanged.connect(self.on_boost_scheme_select)
self.prayers.currentIndexChanged.connect(self.on_prayer_select)
self.redose_level.setValidator(QtGui.QIntValidator(0, 99))
self.cpu_cores.setValidator(QtGui.QIntValidator(0, os.cpu_count())) # Apparently, it only validate the # of digits
self.cpu_cores.setToolTip(f'0 will use all cores. You have {os.cpu_count()}.')
self.on_function_select()
self.on_training_skill_select()
self.on_boost_scheme_select()
self.on_potion_select()
self.on_prayer_select()
# Allow the delete key to remove the selected opponent
shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Delete), self.opponents);
shortcut.activated.connect(self.remove_selected_monster)
shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Backspace), self.opponents); # For mac
shortcut.activated.connect(self.remove_selected_monster)
# Populate spells
spells = list(STANDARD.keys()) + list(ANCIENT.keys())
completer = QtWidgets.QCompleter(spells)
self.spell.clear()
self.spell.addItems(spells)
self.spell.setCompleter(completer)
self.spell.completer().setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
def get_selected_sets(self):
return [s for s in self.special_sets if (
(s == 'dharok' and int(self.entities[s].get())) != 0 or
(s != 'dharok' and self.entities[s].get())
)]
def open_link(self, slot):
pool = EquipmentPoolFiltered()
item = getattr(self, slot).currentText()
try:
equipment = pool.by_name(item)
except ValueError as e:
QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning, 'Wiki Link not Found', str(e)
).exec_()
return
# Encode ending (item name) to "%xx escape" format.
p = Path(equipment['wiki_url'])
url = p.parent/quote(p.name)
webbrowser.open(str(url))
def on_function_select(self):
if self.function.currentText() == 'Train':
self.stackedWidget.setCurrentIndex(0)
else:
self.stackedWidget.setCurrentIndex(1)
def on_training_skill_select(self):
if self.training_skill.currentText() == 'magic':
enable(self.spell)
enable(self.spell_label)
else:
disable(self.spell)
disable(self.spell_label)
def on_prayer_select(self):
if self.prayers.currentText() == 'none':
disable(self.prayer_attributes)
else:
enable(self.prayer_attributes)
def on_potion_select(self):
if self.potions.currentText() == 'none':
disable(self.potion_attributes)
disable(self.boosting_scheme)
else:
enable(self.potion_attributes)
enable(self.boosting_scheme)
self.on_boost_scheme_select()
def on_boost_scheme_select(self):
if self.boosting_scheme.currentText() == 'Constant' or self.potions.currentText() == 'none':
disable(self.below_skill)
disable(self.redose_level)
disable(self.label_5)
else:
enable(self.below_skill)
enable(self.redose_level)
enable(self.label_5)
def get_training_skill(self):
return self.training_skill.currentText().lower()
def remove_selected_monster(self):
name = self.opponents.takeItem(self.opponents.currentRow()).text()
del self.data.monsters[name]
def add_monster(self, name, monster):
self.data.monsters[name] = monster
if name not in (self.opponents.item(i).text() for i in range(self.opponents.count())):
self.opponents.addItem(name) |
py | b40642eeff3d580c8cd805702e59d74a96b1f14c | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument `celery`_ to trace Celery applications.
.. _celery: https://pypi.org/project/celery/
Usage
-----
* Start broker backend
.. code::
docker run -p 5672:5672 rabbitmq
* Run instrumented task
.. code:: python
from opentelemetry.instrumentation.celery import CeleryInstrumentor
from celery import Celery
from celery.signals import worker_process_init
@worker_process_init.connect(weak=False)
def init_celery_tracing(*args, **kwargs):
CeleryInstrumentor().instrument()
app = Celery("tasks", broker="amqp://localhost")
@app.task
def add(x, y):
return x + y
add.delay(42, 50)
API
---
"""
import logging
from typing import Collection, Iterable
from celery import signals # pylint: disable=no-name-in-module
from opentelemetry import trace
from opentelemetry.instrumentation.celery import utils
from opentelemetry.instrumentation.celery.package import _instruments
from opentelemetry.instrumentation.celery.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.propagate import extract, inject
from opentelemetry.propagators.textmap import Getter
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
# Task operations
_TASK_TAG_KEY = "celery.action"
_TASK_APPLY_ASYNC = "apply_async"
_TASK_RUN = "run"
_TASK_RETRY_REASON_KEY = "celery.retry.reason"
_TASK_REVOKED_REASON_KEY = "celery.revoked.reason"
_TASK_REVOKED_TERMINATED_SIGNAL_KEY = "celery.terminated.signal"
_TASK_NAME_KEY = "celery.task_name"
class CeleryGetter(Getter):
def get(self, carrier, key):
value = getattr(carrier, key, None)
if value is None:
return None
if isinstance(value, str) or not isinstance(value, Iterable):
value = (value,)
return value
def keys(self, carrier):
return []
celery_getter = CeleryGetter()
class CeleryInstrumentor(BaseInstrumentor):
def instrumentation_dependencies(self) -> Collection[str]:
return _instruments
def _instrument(self, **kwargs):
tracer_provider = kwargs.get("tracer_provider")
# pylint: disable=attribute-defined-outside-init
self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)
signals.task_prerun.connect(self._trace_prerun, weak=False)
signals.task_postrun.connect(self._trace_postrun, weak=False)
signals.before_task_publish.connect(
self._trace_before_publish, weak=False
)
signals.after_task_publish.connect(
self._trace_after_publish, weak=False
)
signals.task_failure.connect(self._trace_failure, weak=False)
signals.task_retry.connect(self._trace_retry, weak=False)
def _uninstrument(self, **kwargs):
signals.task_prerun.disconnect(self._trace_prerun)
signals.task_postrun.disconnect(self._trace_postrun)
signals.before_task_publish.disconnect(self._trace_before_publish)
signals.after_task_publish.disconnect(self._trace_after_publish)
signals.task_failure.disconnect(self._trace_failure)
signals.task_retry.disconnect(self._trace_retry)
def _trace_prerun(self, *args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
request = task.request
tracectx = extract(request, getter=celery_getter) or None
logger.debug("prerun signal start task_id=%s", task_id)
operation_name = f"{_TASK_RUN}/{task.name}"
span = self._tracer.start_span(
operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER
)
activation = trace.use_span(span, end_on_exit=True)
activation.__enter__() # pylint: disable=E1101
utils.attach_span(task, task_id, (span, activation))
@staticmethod
def _trace_postrun(*args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
logger.debug("postrun signal task_id=%s", task_id)
# retrieve and finish the Span
span, activation = utils.retrieve_span(task, task_id)
if span is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
# request context tags
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_RUN)
utils.set_attributes_from_context(span, kwargs)
utils.set_attributes_from_context(span, task.request)
span.set_attribute(_TASK_NAME_KEY, task.name)
activation.__exit__(None, None, None)
utils.detach_span(task, task_id)
def _trace_before_publish(self, *args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
operation_name = f"{_TASK_APPLY_ASYNC}/{task.name}"
span = self._tracer.start_span(
operation_name, kind=trace.SpanKind.PRODUCER
)
# apply some attributes here because most of the data is not available
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC)
span.set_attribute(SpanAttributes.MESSAGING_MESSAGE_ID, task_id)
span.set_attribute(_TASK_NAME_KEY, task.name)
utils.set_attributes_from_context(span, kwargs)
activation = trace.use_span(span, end_on_exit=True)
activation.__enter__() # pylint: disable=E1101
utils.attach_span(task, task_id, (span, activation), is_publish=True)
headers = kwargs.get("headers")
if headers:
inject(headers)
@staticmethod
def _trace_after_publish(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
# retrieve and finish the Span
_, activation = utils.retrieve_span(task, task_id, is_publish=True)
if activation is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
activation.__exit__(None, None, None) # pylint: disable=E1101
utils.detach_span(task, task_id, is_publish=True)
@staticmethod
def _trace_failure(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
# retrieve and pass exception info to activation
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
status_kwargs = {"status_code": StatusCode.ERROR}
ex = kwargs.get("einfo")
if (
hasattr(task, "throws")
and ex is not None
and isinstance(ex.exception, task.throws)
):
return
if ex is not None:
status_kwargs["description"] = str(ex)
span.set_status(Status(**status_kwargs))
@staticmethod
def _trace_retry(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_request(kwargs)
reason = utils.retrieve_reason(kwargs)
if task is None or task_id is None or reason is None:
return
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
# Add retry reason metadata to span
# Use `str(reason)` instead of `reason.message` in case we get
# something that isn't an `Exception`
span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))
|
py | b40643cb773aab949ed6caa66e97a846596db78f | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class SwishBackward(BackwardFunction):
@property
def name(self):
return 'SwishBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
inp0 = inputs[self._num_inputs_fwd] # y
inp1 = inputs[self._num_inputs_fwd + 1] # dy
v = nn.Variable(inp1.shape)
v.data = inp0.data
v.grad = inp1.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
# Inputs
x0 = inputs[0].data
y0 = inputs[1].data
dy = inputs[2].data
# Outputs
dx0 = outputs[0].data
# Grads of inputs
g_x0 = inputs[0].grad
g_y0 = inputs[1].grad
g_dy = inputs[2].grad
# Grads of outputs
g_dx0 = outputs[0].grad
if prop_down[0]:
s = F.sigmoid(x0)
#y_dev = dx0 / dy
y_dev = y0 + s * (1.0 - y0)
g_x0_ = g_dx0 * dy * (y_dev + s * (1.0 - s)
* (1.0 - y0) - s * y_dev)
if accum[0]:
g_x0 += g_x0_
else:
g_x0.copy_from(g_x0_)
if prop_down[2]:
inp = nn.Variable(x0.shape).apply(
data=x0, grad=g_dy, need_grad=True)
out = nn.Variable(dy.shape).apply(data=y0, grad=g_dx0)
self.forward_func.backward([inp], [out], accum=[accum[2]])
|
py | b40644da9fc6164700ff61ebf66a9a9ea5752e1c | import os
import sys
import logging
import argparse
import textwrap
import re
import asyncio
import unittest
import shutil
from vcd import VCDWriter
from datetime import datetime
from fx2 import VID_CYPRESS, PID_FX2, FX2Config
from fx2.format import input_data, diff_data
from .device import GlasgowDeviceError
from .device.config import GlasgowConfig
from .target.hardware import GlasgowHardwareTarget
from .gateware.analyzer import TraceDecoder
from .device.hardware import VID_QIHW, PID_GLASGOW, GlasgowHardwareDevice
from .internal_test import *
from .access.direct import *
from .applet import *
from .pyrepl import *
logger = logging.getLogger(__name__)
class TextHelpFormatter(argparse.HelpFormatter):
def __init__(self, prog):
super().__init__(prog, width=120)
def _fill_text(self, text, width, indent):
def filler(match):
text = match[0]
list_match = re.match(r"(\s*)\*", text)
if list_match:
return text
text = textwrap.fill(text, width,
initial_indent=indent,
subsequent_indent=indent)
text = re.sub(r"(\w-) (\w)", r"\1\2", text)
text = text + (match[2] or "")
return text
text = textwrap.dedent(text).strip()
return re.sub(r"((?!\n\n)(?!\n\s+(?:\*|\d+\.)).)+(\n*)?", filler, text, flags=re.S)
def create_argparser():
parser = argparse.ArgumentParser(formatter_class=TextHelpFormatter)
parser.add_argument(
"-v", "--verbose", default=0, action="count",
help="increase logging verbosity")
parser.add_argument(
"-q", "--quiet", default=0, action="count",
help="decrease logging verbosity")
parser.add_argument(
"-F", "--filter-log", metavar="FILTER", type=str, action="append",
help="enable maximum verbosity for log messages starting with 'FILTER: '")
return parser
def get_argparser():
def add_subparsers(parser, **kwargs):
if isinstance(parser, argparse._MutuallyExclusiveGroup):
container = parser._container
if kwargs.get('prog') is None:
formatter = container._get_formatter()
formatter.add_usage(container.usage, [], [], '')
kwargs['prog'] = formatter.format_help().strip()
parsers_class = parser._pop_action_class(kwargs, 'parsers')
subparsers = argparse._SubParsersAction(option_strings=[],
parser_class=type(container),
**kwargs)
parser._add_action(subparsers)
else:
subparsers = parser.add_subparsers(dest="applet", metavar="APPLET")
return subparsers
def add_applet_arg(parser, mode, required=False):
subparsers = add_subparsers(parser, dest="applet", metavar="APPLET")
subparsers.required = required
for applet_name, applet in GlasgowApplet.all_applets.items():
if mode == "test" and not hasattr(applet, "test_cls"):
continue
if mode == "tool" and not hasattr(applet, "tool_cls"):
continue
if mode == "tool":
help = applet.tool_cls.help
description = applet.tool_cls.description
else:
help = applet.help
description = applet.description
if applet.preview:
help += " (PREVIEW QUALITY APPLET)"
description = " This applet is PREVIEW QUALITY and may CORRUPT DATA or " \
"have missing features. Use at your own risk.\n" + description
p_applet = subparsers.add_parser(
applet_name, help=help, description=description,
formatter_class=TextHelpFormatter)
if mode == "test":
p_applet.add_argument(
"tests", metavar="TEST", nargs="*",
help="test cases to run")
if mode in ("build", "run"):
access_args = DirectArguments(applet_name=applet_name,
default_port="AB",
pin_count=16)
if mode == "run":
g_applet_build = p_applet.add_argument_group("build arguments")
applet.add_build_arguments(g_applet_build, access_args)
g_applet_run = p_applet.add_argument_group("run arguments")
applet.add_run_arguments(g_applet_run, access_args)
# FIXME: this makes it impossiblt to add subparsers in applets
# g_applet_interact = p_applet.add_argument_group("interact arguments")
# applet.add_interact_arguments(g_applet_interact)
applet.add_interact_arguments(p_applet)
if mode == "build":
applet.add_build_arguments(p_applet, access_args)
if mode == "tool":
applet.tool_cls.add_arguments(p_applet)
parser = create_argparser()
subparsers = parser.add_subparsers(dest="action", metavar="COMMAND")
subparsers.required = True
def add_ports_arg(parser):
parser.add_argument(
"ports", metavar="PORTS", type=str, nargs="?", default="AB",
help="I/O port set (one or more of: A B, default: all)")
def add_voltage_arg(parser, help):
parser.add_argument(
"voltage", metavar="VOLTS", type=float, nargs="?", default=None,
help="%s (range: 1.8-5.0)".format(help))
p_voltage = subparsers.add_parser(
"voltage", formatter_class=TextHelpFormatter,
help="query or set I/O port voltage")
add_ports_arg(p_voltage)
add_voltage_arg(p_voltage,
help="I/O port voltage")
p_voltage.add_argument(
"--tolerance", metavar="PCT", type=float, default=10.0,
help="raise alert if measured voltage deviates by more than ±PCT%% (default: %(default)s)")
p_voltage.add_argument(
"--no-alert", dest="set_alert", default=True, action="store_false",
help="do not raise an alert if Vsense is out of range of Vio")
p_voltage_limit = subparsers.add_parser(
"voltage-limit", formatter_class=TextHelpFormatter,
help="limit I/O port voltage as a safety precaution")
add_ports_arg(p_voltage_limit)
add_voltage_arg(p_voltage_limit,
help="maximum allowed I/O port voltage")
p_run = subparsers.add_parser(
"run", formatter_class=TextHelpFormatter,
help="load an applet bitstream and run applet code")
p_run.add_argument(
"--rebuild", default=False, action="store_true",
help="rebuild bitstream even if an identical one is already loaded")
p_run.add_argument(
"--trace", metavar="FILENAME", type=argparse.FileType("wt"), default=None,
help="trace applet I/O to FILENAME")
g_run_bitstream = p_run.add_mutually_exclusive_group(required=True)
g_run_bitstream.add_argument(
"--bitstream", metavar="FILENAME", type=argparse.FileType("rb"),
help="read bitstream from the specified file")
add_applet_arg(g_run_bitstream, mode="run")
p_tool = subparsers.add_parser(
"tool", formatter_class=TextHelpFormatter,
help="run an offline tool provided with an applet")
add_applet_arg(p_tool, mode="tool")
p_flash = subparsers.add_parser(
"flash", formatter_class=TextHelpFormatter,
help="program FX2 firmware or applet bitstream into EEPROM")
g_flash_firmware = p_flash.add_mutually_exclusive_group()
g_flash_firmware.add_argument(
"--firmware", metavar="FILENAME", type=argparse.FileType("rb"),
help="read firmware from the specified file")
g_flash_firmware.add_argument(
"--remove-firmware", default=False, action="store_true",
help="remove any firmware present")
g_flash_bitstream = p_flash.add_mutually_exclusive_group()
g_flash_bitstream.add_argument(
"--bitstream", metavar="FILENAME", type=argparse.FileType("rb"),
help="read bitstream from the specified file")
g_flash_bitstream.add_argument(
"--remove-bitstream", default=False, action="store_true",
help="remove any bitstream present")
add_applet_arg(g_flash_bitstream, mode="build")
def revision(arg):
if re.match(r"^[A-Z]$", arg):
return arg
else:
raise argparse.ArgumentTypeError("{} is not a valid revision letter".format(arg))
def serial(arg):
if re.match(r"^\d{8}T\d{6}Z$", arg):
return arg
else:
raise argparse.ArgumentTypeError("{} is not a valid serial number".format(arg))
p_build = subparsers.add_parser(
"build", formatter_class=TextHelpFormatter,
help="(advanced) build applet logic and save it as a file")
p_build.add_argument(
"--trace", default=False, action="store_true",
help="include applet analyzer")
p_build.add_argument(
"-t", "--type", metavar="TYPE", type=str,
choices=["zip", "archive", "v", "verilog", "bin", "bitstream"], default="bitstream",
help="artifact to build (one of: archive verilog bitstream, default: %(default)s)")
p_build.add_argument(
"-f", "--filename", metavar="FILENAME", type=str,
help="file to save artifact to (default: <applet-name>.{v,bin})")
add_applet_arg(p_build, mode="build", required=True)
p_test = subparsers.add_parser(
"test", formatter_class=TextHelpFormatter,
help="(advanced) test applet logic without target hardware")
add_applet_arg(p_test, mode="test", required=True)
p_internal_test = subparsers.add_parser(
"internal-test", help="(advanced) verify device functionality")
internal_test_subparsers = p_internal_test.add_subparsers(dest="mode", metavar="MODE")
internal_test_subparsers.required = True
p_internal_test_toggle_io = internal_test_subparsers.add_parser(
"toggle-io", help="output 1 kHz square wave on all I/O pins at 3.3 V")
p_internal_test_mirror_i2c = internal_test_subparsers.add_parser(
"mirror-i2c", help="mirror {SDA,SCL} on A[0-1] at 3.3 V")
p_internal_test_shift_out = internal_test_subparsers.add_parser(
"shift-out", help="shift bytes from EP2OUT MSB first via {CLK,DO} on A[0-1] at 3.3 V")
p_internal_test_shift_out.add_argument(
"--async", dest="is_async", default=False, action="store_true",
help="use asynchronous FIFO")
p_internal_test_gen_seq = internal_test_subparsers.add_parser(
"gen-seq", help="read limit from EP4IN and generate sequence on {EP2OUT,EP6OUT}")
p_internal_test_pll = internal_test_subparsers.add_parser(
"pll", help="use PLL to output 15 MHz on SYNC port")
p_internal_test_registers = internal_test_subparsers.add_parser(
"registers", help="add I2C RW register [0] and RO register [1] = [0] << 1")
p_factory = subparsers.add_parser(
"factory", formatter_class=TextHelpFormatter,
help="(advanced) initial device programming")
p_factory.add_argument(
"--revision", metavar="REVISION", type=str,
default="B",
help="revision letter (if not specified: %(default)s)")
p_factory.add_argument(
"--serial", metavar="SERIAL", type=str,
default=datetime.now().strftime("%Y%m%dT%H%M%SZ"),
help="serial number in ISO 8601 format (if not specified: %(default)s)")
return parser
# The name of this function appears in Verilog output, so keep it tidy.
def _applet(args):
target = GlasgowHardwareTarget(multiplexer_cls=DirectMultiplexer,
with_analyzer=hasattr(args, "trace") and args.trace)
applet = GlasgowApplet.all_applets[args.applet]()
try:
applet.build(target, args)
except GlasgowAppletError as e:
applet.logger.error(e)
logger.error("failed to build subtarget for applet %r", args.applet)
raise SystemExit()
return target, applet
class ANSIColorFormatter(logging.Formatter):
LOG_COLORS = {
"TRACE" : "\033[37m",
"DEBUG" : "\033[36m",
"INFO" : "\033[1;37m",
"WARNING" : "\033[1;33m",
"ERROR" : "\033[1;31m",
"CRITICAL": "\033[1;41m",
}
def format(self, record):
color = self.LOG_COLORS.get(record.levelname, "")
return "{}{}\033[0m".format(color, super().format(record))
class SubjectFilter:
def __init__(self, level, subjects):
self.level = level
self.subjects = subjects
def filter(self, record):
levelno = record.levelno
for subject in self.subjects:
if record.msg.startswith(subject + ": "):
levelno = logging.DEBUG
return levelno >= self.level
def create_logger(args):
formatter_args = {"fmt": "{levelname[0]:s}: {name:s}: {message:s}", "style": "{"}
handler = logging.StreamHandler()
if sys.stderr.isatty() and sys.platform != 'win32':
handler.setFormatter(ANSIColorFormatter(**formatter_args))
else:
handler.setFormatter(logging.Formatter(**formatter_args))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
level = logging.INFO + args.quiet * 10 - args.verbose * 10
if args.filter_log:
handler.addFilter(SubjectFilter(level, args.filter_log))
root_logger.setLevel(logging.TRACE)
else:
# By setting the log level on the root logger, we avoid creating LogRecords in the first
# place instead of filtering them later; we have a *lot* of logging, so this is much
# more efficient.
root_logger.setLevel(level)
async def _main():
args = get_argparser().parse_args()
create_logger(args)
try:
firmware_file = os.path.join(os.path.dirname(__file__), "glasgow.ihex")
if args.action in ("build", "test", "tool"):
pass
elif args.action == "factory":
device = GlasgowHardwareDevice(firmware_file, VID_CYPRESS, PID_FX2)
else:
device = GlasgowHardwareDevice(firmware_file)
if args.action == "voltage":
if args.voltage is not None:
await device.reset_alert(args.ports)
await device.poll_alert() # clear any remaining alerts
try:
await device.set_voltage(args.ports, args.voltage)
except:
await device.set_voltage(args.ports, 0.0)
raise
if args.set_alert and args.voltage != 0.0:
await asyncio.sleep(0.050) # let the output capacitor discharge a bit
await device.set_alert_tolerance(args.ports, args.voltage,
args.tolerance / 100)
print("Port\tVio\tVlimit\tVsense\tMonitor")
alerts = await device.poll_alert()
for port in args.ports:
vio = await device.get_voltage(port)
vlimit = await device.get_voltage_limit(port)
vsense = await device.measure_voltage(port)
alert = await device.get_alert(port)
notice = ""
if port in alerts:
notice += " (ALERT)"
print("{}\t{:.2}\t{:.2}\t{:.3}\t{:.2}-{:.2}\t{}"
.format(port, vio, vlimit, vsense, alert[0], alert[1], notice))
if args.action == "voltage-limit":
if args.voltage is not None:
await device.set_voltage_limit(args.ports, args.voltage)
print("Port\tVio\tVlimit")
for port in args.ports:
vio = await device.get_voltage(port)
vlimit = await device.get_voltage_limit(port)
print("{}\t{:.2}\t{:.2}"
.format(port, vio, vlimit))
if args.action == "run":
if args.applet:
target, applet = _applet(args)
device.demultiplexer = DirectDemultiplexer(device)
await device.download_target(target, rebuild=args.rebuild)
if args.trace:
logger.info("starting applet analyzer")
await device.write_register(target.analyzer.addr_done, 0)
analyzer_iface = await device.demultiplexer.claim_interface(
target.analyzer, target.analyzer.mux_interface, args=None)
trace_decoder = TraceDecoder(target.analyzer.event_sources)
vcd_writer = VCDWriter(args.trace, timescale="1 ns", check_values=False,
comment='Generated by Glasgow for bitstream ID %s' % bitstream_id.hex())
async def run_analyzer():
if not args.trace:
return
signals = {}
strobes = set()
for field_name, field_trigger, field_width in trace_decoder.events():
if field_trigger == "throttle":
var_type = "wire"
var_init = 0
elif field_trigger == "change":
var_type = "wire"
var_init = "x"
elif field_trigger == "strobe":
if field_width > 0:
var_type = "tri"
var_init = "z"
else:
var_type = "event"
var_init = ""
else:
assert False
signals[field_name] = vcd_writer.register_var(
scope="", name=field_name, var_type=var_type,
size=field_width, init=var_init)
if field_trigger == "strobe":
strobes.add(field_name)
init = True
while not trace_decoder.is_done():
trace_decoder.process(await analyzer_iface.read())
for cycle, events in trace_decoder.flush():
if events == "overrun":
target.analyzer.logger.error("FIFO overrun, shutting down")
for name in signals:
vcd_writer.change(signals[name], next_timestamp, "x")
timestamp += 1e3 # 1us
break
event_repr = " ".join("{}={}".format(n, v)
for n, v in events.items())
target.analyzer.logger.trace("cycle %d: %s", cycle, event_repr)
timestamp = 1e9 * (cycle + 0) // target.sys_clk_freq
next_timestamp = 1e9 * (cycle + 1) // target.sys_clk_freq
if init:
init = False
vcd_writer._timestamp = timestamp
for name, value in events.items():
vcd_writer.change(signals[name], timestamp, value)
for name, _value in events.items():
if name in strobes:
vcd_writer.change(signals[name], next_timestamp, "z")
vcd_writer.flush()
vcd_writer.close(timestamp)
async def run_applet():
logger.info("running handler for applet %r", args.applet)
try:
iface = await applet.run(device, args)
await applet.interact(device, args, iface)
except GlasgowAppletError as e:
applet.logger.error(str(e))
finally:
if args.trace:
await device.write_register(target.analyzer.addr_done, 1)
done, pending = await asyncio.wait([run_analyzer(), run_applet()],
return_when=asyncio.FIRST_EXCEPTION)
for task in done:
await task
# Work around bugs in python-libusb1 that cause segfaults on interpreter shutdown.
await device.demultiplexer.flush()
else:
with args.bitstream as f:
logger.info("downloading bitstream from %r", f.name)
await device.download_bitstream(f.read())
if args.action == "tool":
tool = GlasgowApplet.all_applets[args.applet].tool_cls()
await tool.run(args)
if args.action == "flash":
logger.info("reading device configuration")
header = await device.read_eeprom("fx2", 0, 8 + 4 + GlasgowConfig.size)
header[0] = 0xC2 # see below
fx2_config = FX2Config.decode(header, partial=True)
if (len(fx2_config.firmware) != 1 or
fx2_config.firmware[0][0] != 0x4000 - GlasgowConfig.size or
len(fx2_config.firmware[0][1]) != GlasgowConfig.size):
raise SystemExit("Unrecognized or corrupted configuration block")
glasgow_config = GlasgowConfig.decode(fx2_config.firmware[0][1])
logger.info("device has serial %s-%s",
glasgow_config.revision, glasgow_config.serial)
if fx2_config.disconnect:
logger.info("device has flashed firmware")
else:
logger.info("device does not have flashed firmware")
if glasgow_config.bitstream_size:
logger.info("device has flashed bitstream ID %s",
glasgow_config.bitstream_id.hex())
else:
logger.info("device does not have flashed bitstream")
new_bitstream = b""
if args.remove_bitstream:
logger.info("removing bitstream")
glasgow_config.bitstream_size = 0
glasgow_config.bitstream_id = b"\x00"*16
elif args.bitstream:
logger.info("using bitstream from %s", args.bitstream.name)
with args.bitstream as f:
new_bitstream = f.read()
glasgow_config.bitstream_size = len(new_bitstream)
glasgow_config.bitstream_id = b"\xff"*16
elif args.applet:
logger.info("building bitstream for applet %s", args.applet)
target, applet = _applet(args)
new_bitstream_id = target.get_bitstream_id()
new_bitstream = target.get_bitstream(debug=True)
# We always build and reflash the bitstream in case the one currently
# in EEPROM is corrupted. If we only compared the ID, there would be
# no easy way to recover from that case. There's also no point in
# storing the bitstream hash (as opposed to Verilog hash) in the ID,
# as building the bitstream takes much longer than flashing it.
logger.info("built bitstream ID %s", new_bitstream_id.hex())
glasgow_config.bitstream_size = len(new_bitstream)
glasgow_config.bitstream_id = new_bitstream_id
fx2_config.firmware[0] = (0x4000 - GlasgowConfig.size, glasgow_config.encode())
if args.remove_firmware:
logger.info("removing firmware")
fx2_config.disconnect = False
new_image = fx2_config.encode()
new_image[0] = 0xC0 # see below
else:
logger.info("using firmware from %r",
args.firmware.name if args.firmware else firmware_file)
with (args.firmware or open(firmware_file, "rb")) as f:
for (addr, chunk) in input_data(f, fmt="ihex"):
fx2_config.append(addr, chunk)
fx2_config.disconnect = True
new_image = fx2_config.encode()
if new_bitstream:
logger.info("programming bitstream")
old_bitstream = await device.read_eeprom("ice", 0, len(new_bitstream))
if old_bitstream != new_bitstream:
for (addr, chunk) in diff_data(old_bitstream, new_bitstream):
await device.write_eeprom("ice", addr, chunk)
logger.info("verifying bitstream")
if await device.read_eeprom("ice", 0, len(new_bitstream)) != new_bitstream:
logger.critical("bitstream programming failed")
return 1
else:
logger.info("bitstream identical")
logger.info("programming configuration and firmware")
old_image = await device.read_eeprom("fx2", 0, len(new_image))
if old_image != new_image:
for (addr, chunk) in diff_data(old_image, new_image):
await device.write_eeprom("fx2", addr, chunk)
logger.info("verifying configuration and firmware")
if await device.read_eeprom("fx2", 0, len(new_image)) != new_image:
logger.critical("configuration/firmware programming failed")
return 1
else:
logger.info("configuration and firmware identical")
if args.action == "build":
target, applet = _applet(args)
logger.info("building bitstream for applet %r", args.applet)
if args.type in ("v", "verilog"):
target.get_verilog().write(args.filename or args.applet + ".v")
if args.type in ("bin", "bitstream"):
with open(args.filename or args.applet + ".bin", "wb") as f:
f.write(target.get_bitstream(debug=True))
if args.type in ("zip", "archive"):
with target.get_build_tree() as tree:
if args.filename:
basename, = os.path.splitext(args.filename)
else:
basename = args.applet
shutil.make_archive(basename, format="zip", root_dir=tree, logger=logger)
if args.action == "test":
logger.info("testing applet %r", args.applet)
applet = GlasgowApplet.all_applets[args.applet]()
loader = unittest.TestLoader()
stream = unittest.runner._WritelnDecorator(sys.stderr)
result = unittest.TextTestResult(stream=stream, descriptions=True, verbosity=2)
result.failfast = True
def startTest(test):
unittest.TextTestResult.startTest(result, test)
result.stream.write("\n")
result.startTest = startTest
if args.tests == []:
suite = loader.loadTestsFromTestCase(applet.test_cls)
suite.run(result)
else:
for test in args.tests:
suite = loader.loadTestsFromName(test, module=applet.test_cls)
suite.run(result)
if not result.wasSuccessful():
for _, traceback in result.errors + result.failures:
print(traceback, end="", file=sys.stderr)
return 1
if args.action == "internal-test":
if args.mode == "toggle-io":
await device.download_bitstream(TestToggleIO().get_bitstream(debug=True))
await device.set_voltage("AB", 3.3)
if args.mode == "mirror-i2c":
await device.download_bitstream(TestMirrorI2C().get_bitstream(debug=True))
await device.set_voltage("A", 3.3)
if args.mode == "shift-out":
await device.download_bitstream(TestShiftOut(is_async=args.is_async)
.get_bitstream(debug=True))
await device.set_voltage("A", 3.3)
if args.mode == "gen-seq":
await device.download_bitstream(TestGenSeq().get_bitstream(debug=True))
if args.mode == "pll":
await device.download_bitstream(TestPLL().get_bitstream(debug=True))
if args.mode == "registers":
await device.download_bitstream(TestRegisters().get_bitstream(debug=True))
if args.action == "factory":
logger.info("reading device configuration")
header = await device.read_eeprom("fx2", 0, 8 + 4 + GlasgowConfig.size)
if not re.match(rb"^\xff+$", header):
logger.error("device already factory-programmed")
return 1
fx2_config = FX2Config(vendor_id=VID_QIHW, product_id=PID_GLASGOW,
device_id=1 + ord(args.revision) - ord('A'),
i2c_400khz=True)
glasgow_config = GlasgowConfig(args.revision, args.serial)
fx2_config.append(0x4000 - GlasgowConfig.size, glasgow_config.encode())
image = fx2_config.encode()
# Let FX2 hardware enumerate. This won't load the configuration block
# into memory automatically, but the firmware has code that does that
# if it detects a C0 load.
image[0] = 0xC0
logger.info("programming device configuration")
await device.write_eeprom("fx2", 0, image)
logger.info("verifying device configuration")
if await device.read_eeprom("fx2", 0, len(image)) != image:
logger.critical("factory programming failed")
return 1
except GlasgowDeviceError as e:
logger.error(e)
return 1
return 0
def main():
loop = asyncio.get_event_loop()
exit(loop.run_until_complete(_main()))
if __name__ == "__main__":
main()
|
py | b4064519be7d01970a494dc54630131a5742ae91 | #!/usr/bin/python3
# Even Odd program
num1 = 0
while num1 >= 0:
num1 = int(input('Please Enter a Number: '))
if num1 % 2 == 0 :
print("EVEN number")
else:
print("ODD number")
|
py | b406468ef372d5c7e50eafcd701b25e0623ee59a | from classifiers import *
import math
import numpy as np
from scipy.spatial import KDTree
import utils
def kdtree_classify(classifier, entry, class_index=-1, k=1):
prepared_entry = utils.without_column(entry, class_index)
result = classifier.descriptor.query([prepared_entry], k=k)
scoreboard = {}
indexes = result[1]
if k > 1:
indexes = indexes[0]
for index in indexes:
category = classifier.categories[index]
if category not in scoreboard:
scoreboard[category] = 0
scoreboard[category] += 1
winner = (0, None)
for key, value in scoreboard.items():
if value > winner[0]:
winner = (value, key)
return winner[1]
def classify(self, entry, class_index=-1, k=1):
max_similarity = -float("inf")
best_categories = []
prepared_external_entry = utils.without_column(entry, class_index)
for i in range(len(self.descriptor)):
# prepared_internal_entry = utils.without_column(internal_entry, class_index)
internal_entry = self.descriptor[i]
class_value = self.categories[i]
similarity = -euclidian_dist(prepared_external_entry, internal_entry)
if similarity > max_similarity:
max_similarity = similarity
best_categories = [class_value]
elif similarity == max_similarity:
best_categories.append(class_value)
best_category = self.pick_one(best_categories)
return best_category
class Classifier:
def __init__(self):
self.hits = 0
self.fails = 0
self.descriptor = []
self.categories = []
def classify(self, entry, class_index=-1, k=1):
return self.on_classify(self, entry, class_index, k)
def add_random_entry(self, training_set):
self.descriptor.append(self.pick_one(training_set))
def pick_index(self, array):
return round(np.random.uniform(0, len(array) - 1))
def pick_one(self, array):
return array[self.pick_index(array)]
def remove_one(self, array):
index = self.pick_index(array)
value = array[index]
del array[index]
return value
class IBL1(Classifier):
def __init__(self, training_set, class_index=-1, params={}):
super(IBL1, self).__init__()
self.on_classify = kdtree_classify
if len(self.descriptor) == 0:
self.add_random_entry(training_set)
for external_entry in training_set:
max_similarity = -float("inf")
best_entries = []
for internal_entry in self.descriptor:
prepared_external_entry = utils.without_column(external_entry, class_index)
prepared_internal_entry = utils.without_column(internal_entry, class_index)
similarity = -euclidian_dist(prepared_external_entry, prepared_internal_entry)
if similarity > max_similarity:
max_similarity = similarity
best_entries = [internal_entry]
elif similarity == max_similarity:
best_entries.append(internal_entry)
best_entry = self.pick_one(best_entries)
if external_entry[class_index] == best_entry[class_index]:
self.hits += 1
else:
self.fails += 1
self.descriptor.append(external_entry)
for i in range(len(self.descriptor)):
self.categories.append(self.descriptor[i][class_index])
self.descriptor[i] = utils.without_column(self.descriptor[i], class_index)
self.descriptor = KDTree(np.array(self.descriptor))
class IBL2(Classifier):
def __init__(self, training_set, class_index=-1, params={}):
super(IBL2, self).__init__()
if len(self.descriptor) == 0:
self.add_random_entry(training_set)
self.on_classify = kdtree_classify
for external_entry in training_set:
max_similarity = -float("inf")
best_entries = []
for internal_entry in self.descriptor:
prepared_external_entry = utils.without_column(external_entry, class_index)
prepared_internal_entry = utils.without_column(internal_entry, class_index)
similarity = -euclidian_dist(prepared_external_entry, prepared_internal_entry)
if similarity > max_similarity:
max_similarity = similarity
best_entries = [internal_entry]
elif similarity == max_similarity:
best_entries.append(internal_entry)
best_entry = self.pick_one(best_entries)
if external_entry[class_index] == best_entry[class_index]:
self.hits += 1
else:
self.fails += 1
self.descriptor.append(external_entry)
for i in range(len(self.descriptor)):
self.categories.append(self.descriptor[i][class_index])
self.descriptor[i] = utils.without_column(self.descriptor[i], class_index)
self.descriptor = KDTree(np.array(self.descriptor))
class IBL3(Classifier):
class Register:
counter = 0
def __init__(self, entry, category):
self.id = self.counter
self.category = category
self.entry = entry
self.hits = 0
self.fails = 0
self.counter += 1
def __init__(self, training_set, class_index=-1, params={}):
super(IBL3, self).__init__()
self.on_classify = kdtree_classify
self.dropped = []
frequency_data = {}
processed_instances = 0
dropped_instances = 0
# Adds a random instance to the descriptor
if len(self.descriptor) == 0:
random_entry = self.remove_one(training_set)
(entry, class_value) = self.prepare(random_entry, class_index)
frequency_data[class_value] = 1
processed_instances += 1
register = self.Register(entry, class_value)
register.hits += 1
self.descriptor.append(register)
training_size = len(training_set)
for external_entry in training_set:
(entry, class_value) = self.prepare(external_entry, class_index)
# Searches for acceptable instances in the descriptor
best_acceptable = None
similarity_table = {}
for register in self.descriptor:
category = register.category
# Populates the similarity table
similarity = -euclidian_dist(entry, register.entry)
similarity_table[register.id] = similarity
# classifying acceptability factors
zf = params["zfa"]
zp = params["zpa"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if frequency_interval["sup"] < precision_interval["inf"]:
# Accept the instance
if not best_acceptable or best_acceptable[1] < similarity:
best_acceptable = (register, similarity)
if not best_acceptable and len(self.descriptor) > 0:
# No acceptable instances were found,
# so use a random register instead
random_register = self.pick_one(self.descriptor)
similarity = similarity_table[random_register.id]
best_acceptable = (random_register, similarity)
# Flag that indicates if we learned a new entry
learned = False
if best_acceptable and best_acceptable[0].category == class_value:
# Correct evaluation, simply update the hit counter
self.hits += 1
else:
# Incorrect evaluation, update the fail counter, then learn
self.fails += 1
# Learn the new entry
new_register = self.Register(entry, class_value)
new_register.hits += 1
self.descriptor.append(new_register)
learned = True
# Updates the frequency data
# TODO: is this the right place to do it?
if class_value not in frequency_data:
frequency_data[class_value] = 0
frequency_data[class_value] += 1
# Updates the processed instances counter
processed_instances += 1
# Size of the search space
# If we just appended a new entry, ignore it
descriptor_size = len(self.descriptor)
if learned:
descriptor_size -= 1
# Update all registers in range
i = 0
while i < descriptor_size:
register = self.descriptor[i]
# Similarity of the register used as the best "acceptable"
outer_similarity = best_acceptable[1]
similarity = similarity_table[register.id]
if similarity >= outer_similarity:
category = register.category
# Update the current register
if category == class_value:
register.hits += 1
else:
register.fails += 1
# discard factor
zf = params["zfd"]
zp = params["zpd"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if precision_interval["sup"] < frequency_interval["inf"]:
# Discard the instance
self.dropped.append(self.descriptor[i].entry)
del self.descriptor[i]
descriptor_size -= 1
frequency_data[category] -= 1
dropped_instances += 1
i -= 1
i += 1
print("Dropped: %s" % (dropped_instances))
# Transforms the descriptor into a KD-Tree
for i in range(len(self.descriptor)):
self.categories.append(self.descriptor[i].category)
self.descriptor[i] = self.descriptor[i].entry
self.descriptor = KDTree(np.array(self.descriptor))
def prepare(self, entry, class_index=-1):
return (utils.without_column(entry, class_index), entry[class_index])
def interval(self, p, z, n):
d = (1 + (z * z) / n)
f1 = p + (z * z) / (2 * n)
f2 = z * math.sqrt(p * (1 - p) / n + (z * z) / (4 * n * n))
return {
"inf": (f1 - f2) / d,
"sup": (f1 + f2) / d
}
class IBL4(Classifier):
class Register:
counter = 0
def __init__(self, entry, category):
self.id = self.counter
self.category = category
self.entry = entry
self.hits = 0
self.fails = 0
self.counter += 1
def __init__(self, training_set, class_index=-1, params={}):
super(IBL4, self).__init__()
self.on_classify = classify
self.dropped = []
frequency_data = {}
processed_instances = 0
dropped_instances = 0
accumulated_weights = []
normalized_weights = []
weights = []
# Adds a random instance to the descriptor
if len(self.descriptor) == 0:
random_entry = self.remove_one(training_set)
(entry, class_value) = self.prepare(random_entry, class_index)
# Sets initial values for the weights
num_attributes = len(entry)
for i in range(len(entry)):
accumulated_weights.append(0.01)
normalized_weights.append(0.01)
weights.append(1 / num_attributes)
frequency_data[class_value] = 1
processed_instances += 1
register = self.Register(entry, class_value)
register.hits += 1
self.descriptor.append(register)
training_size = len(training_set)
for external_entry in training_set:
(entry, class_value) = self.prepare(external_entry, class_index)
if class_value not in frequency_data:
frequency_data[class_value] = 0
# Searches for acceptable instances in the descriptor
best_acceptable = None
similarity_table = {}
for register in self.descriptor:
category = register.category
# Populates the similarity table
similarity = self.weighted_similarity(entry, register.entry, weights)
similarity_table[register.id] = similarity
# classifying acceptability factors
zf = params["zfa"]
zp = params["zpa"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if frequency_interval["sup"] < precision_interval["inf"]:
# Accept the instance
if not best_acceptable or best_acceptable[1] < similarity:
best_acceptable = (register, similarity)
if not best_acceptable and len(self.descriptor) > 0:
# No acceptable instances were found,
# so use a random register instead
random_register = self.pick_one(self.descriptor)
similarity = similarity_table[random_register.id]
best_acceptable = (random_register, similarity)
# Flag that indicates if we learned a new entry
learned = False
if best_acceptable and best_acceptable[0].category == class_value:
# Correct evaluation, simply update the hit counter
self.hits += 1
else:
# Incorrect evaluation, update the fail counter, then learn
self.fails += 1
# Learn the new entry
new_register = self.Register(entry, class_value)
new_register.hits += 1
self.descriptor.append(new_register)
learned = True
# Updates the frequency data
frequency_data[class_value] += 1
# Updates the processed instances counter
processed_instances += 1
# Size of the search space
# If we just appended a new entry, ignore it
descriptor_size = len(self.descriptor)
if learned:
descriptor_size -= 1
# Update all registers in range
i = 0
while i < descriptor_size:
register = self.descriptor[i]
# Similarity of the register used as the best "acceptable"
outer_similarity = best_acceptable[1]
similarity = similarity_table[register.id]
if similarity >= outer_similarity:
category = register.category
# Update the current register
if category == class_value:
register.hits += 1
else:
register.fails += 1
# discard factor
zf = params["zfd"]
zp = params["zpd"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if precision_interval["sup"] < frequency_interval["inf"]:
# Discard the instance
self.dropped.append(self.descriptor[i].entry)
del self.descriptor[i]
descriptor_size -= 1
frequency_data[category] -= 1
dropped_instances += 1
i -= 1
i += 1
# Iterates over the attributes, updating its weights
if len(self.descriptor) > 0:
reference = best_acceptable[0]
category = reference.category
for i in range(len(reference.entry)):
delta = abs(entry[i] - reference.entry[i])
lambd = max(frequency_data[class_value], frequency_data[category])
lambd /= len(self.descriptor)
complement = 1 - lambd
if class_value == reference.entry[i]:
accumulated_weights[i] += complement * (1 - delta)
else:
accumulated_weights[i] += complement * delta
normalized_weights[i] += complement
acc = accumulated_weights[i]
norm = normalized_weights[i]
weights[i] = max(0, acc / norm - 0.5)
print("Dropped: %s" % (dropped_instances))
print("Weights: %s" % weights)
for i in range(len(self.descriptor)):
self.categories.append(self.descriptor[i].category)
self.descriptor[i] = self.descriptor[i].entry
def weighted_similarity(self, first, second, weights):
result = 0
for i in range(len(first)):
result += (weights[i] * (first[i] - second[i])) ** 2
return -math.sqrt(result)
def prepare(self, entry, class_index=-1):
return (utils.without_column(entry, class_index), entry[class_index])
def interval(self, p, z, n):
d = (1 + (z * z) / n)
f1 = p + (z * z) / (2 * n)
f2 = z * math.sqrt(p * (1 - p) / n + (z * z) / (4 * n * n))
return {
"inf": (f1 - f2) / d,
"sup": (f1 + f2) / d
}
class IBL5(Classifier):
class Register:
counter = 0
def __init__(self, entry, category):
self.id = self.counter
self.category = category
self.entry = entry
self.hits = 0
self.fails = 0
self.counter += 1
def __init__(self, training_set, class_index=-1, params={}):
super(IBL5, self).__init__()
self.on_classify = classify
self.dropped = []
frequency_data = {}
processed_instances = 0
dropped_instances = 0
accumulated_weights = []
normalized_weights = []
weights = []
# Adds a random instance to the descriptor
if len(self.descriptor) == 0:
random_entry = self.remove_one(training_set)
(entry, class_value) = self.prepare(random_entry, class_index)
# Sets initial values for the weights
num_attributes = len(entry)
for i in range(len(entry)):
accumulated_weights.append(0.01)
normalized_weights.append(0.01)
weights.append(1 / num_attributes)
frequency_data[class_value] = 1
processed_instances += 1
register = self.Register(entry, class_value)
register.hits += 1
self.descriptor.append(register)
training_size = len(training_set)
for external_entry in training_set:
(entry, class_value) = self.prepare(external_entry, class_index)
if class_value not in frequency_data:
frequency_data[class_value] = 0
# Searches for acceptable instances in the descriptor
best_acceptable = None
similarity_table = {}
for register in self.descriptor:
category = register.category
# Populates the similarity table
similarity = self.weighted_similarity(entry, register.entry, weights)
similarity_table[register.id] = similarity
# classifying acceptability factors
zf = params["zfa"]
zp = params["zpa"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if frequency_interval["sup"] < precision_interval["inf"]:
# Accept the instance
if not best_acceptable or best_acceptable[1] < similarity:
best_acceptable = (register, similarity)
if not best_acceptable and len(self.descriptor) > 0:
# No acceptable instances were found,
# so use a random register instead
random_register = self.pick_one(self.descriptor)
similarity = similarity_table[random_register.id]
best_acceptable = (random_register, similarity)
# Flag that indicates if we learned a new entry
learned = False
if best_acceptable and best_acceptable[0].category == class_value:
# Correct evaluation, simply update the hit counter
self.hits += 1
else:
# Incorrect evaluation, update the fail counter, then learn
self.fails += 1
# Learn the new entry
new_register = self.Register(entry, class_value)
new_register.hits += 1
self.descriptor.append(new_register)
learned = True
# Updates the frequency data
frequency_data[class_value] += 1
# Updates the processed instances counter
processed_instances += 1
# Size of the search space
# If we just appended a new entry, ignore it
descriptor_size = len(self.descriptor)
if learned:
descriptor_size -= 1
# Update all registers in range
i = 0
while i < descriptor_size:
register = self.descriptor[i]
# Similarity of the register used as the best "acceptable"
outer_similarity = best_acceptable[1]
similarity = similarity_table[register.id]
if similarity >= outer_similarity:
category = register.category
# Update the current register
if category == class_value:
register.hits += 1
else:
register.fails += 1
# discard factor
zf = params["zfd"]
zp = params["zpd"]
# Calculates the frequency interval (class)
p = frequency_data[category] / len(self.descriptor)
n = processed_instances
frequency_interval = self.interval(p, zf, n)
# Calculates the precision interval (instance)
n = register.hits + register.fails
p = register.hits / n
precision_interval = self.interval(p, zp, n)
if precision_interval["sup"] < frequency_interval["inf"]:
# Discard the instance
self.dropped.append(self.descriptor[i].entry)
del self.descriptor[i]
descriptor_size -= 1
frequency_data[category] -= 1
dropped_instances += 1
i -= 1
i += 1
# Iterates over the attributes, updating its weights
if len(self.descriptor) > 0:
reference = best_acceptable[0]
category = reference.category
for i in range(len(reference.entry)):
if not self.both_known(entry[i], reference.entry[i]):
continue
delta = abs(entry[i] - reference.entry[i])
lambd = max(frequency_data[class_value], frequency_data[category])
lambd /= len(self.descriptor)
complement = 1 - lambd
if class_value == reference.entry[i]:
accumulated_weights[i] += complement * (1 - delta)
else:
accumulated_weights[i] += complement * delta
normalized_weights[i] += complement
acc = accumulated_weights[i]
norm = normalized_weights[i]
weights[i] = max(0, acc / norm - 0.5)
print("Dropped: %s" % (dropped_instances))
for i in range(len(self.descriptor)):
self.categories.append(self.descriptor[i].category)
self.descriptor[i] = self.descriptor[i].entry
def weighted_similarity(self, first, second, weights):
result = 0
for i in range(len(first)):
if self.both_known(first[i], second[i]):
dif = first[i] - second[i]
else:
dif = 0
result += (weights[i] * dif) ** 2
return -math.sqrt(result)
def both_known(self, first, second):
return first != "" and second != ""
def prepare(self, entry, class_index=-1):
return (utils.without_column(entry, class_index), entry[class_index])
def interval(self, p, z, n):
d = (1 + (z * z) / n)
f1 = p + (z * z) / (2 * n)
f2 = z * math.sqrt(p * (1 - p) / n + (z * z) / (4 * n * n))
return {
"inf": (f1 - f2) / d,
"sup": (f1 + f2) / d
}
|
py | b406469234e77ee6614a7cc110df6d4210bbdf12 | """
Module: 'btree' on micropython-v1.10-esp32
"""
# MCU: {'ver': 'v1.10', 'build': '', 'platform': 'esp32', 'port': 'esp32', 'machine': 'ESP32 module with ESP32', 'release': '1.10.0', 'nodename': 'esp32', 'name': 'micropython', 'family': 'micropython', 'sysname': 'esp32', 'version': '1.10.0'}
# Stubber: 1.5.4
from typing import Any
DESC = 2 # type: int
INCL = 1 # type: int
def open(*args, **kwargs) -> Any:
...
|
py | b40647ce44df1e709c4c209daf69dfcb32582d8f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
import datetime
from django.db import models
class Question(models.Model):
title = models.CharField(max_length=50)
private = models.BooleanField(default=False)
user = models.ForeignKey('question_api.User')
class Answer(models.Model):
question = models.ForeignKey(Question, related_name='answers')
body = models.TextField(max_length=50)
user = models.ForeignKey('question_api.User')
class Tenant(models.Model):
name = models.CharField(max_length=30)
api_key = models.CharField(default=uuid.uuid4, max_length=100, editable=False)
def __unicode__(self):
return self.name
class User(models.Model):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class APICount(models.Model):
tenant = models.ForeignKey(Tenant)
count = models.IntegerField(default=0)
next_timestamp = models.DateTimeField(default=datetime.datetime.now())
def __unicode__(self):
return "Tenant: {} | Api Count: {}".format(
self.tenant.name, self.count) |
py | b40647ec31a853e132064ab74f5ac6f052999891 | from __future__ import absolute_import
from sentry.filters.browser_extensions import BrowserExtensionsFilter
from sentry.testutils import TestCase
class BrowserExtensionsFilterTest(TestCase):
filter_cls = BrowserExtensionsFilter
def apply_filter(self, data):
return self.filter_cls(self.project).test(data)
def get_mock_data(self, exc_value=None, exc_source=None):
return {
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'value': exc_value or 'undefined is not defined',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js'
},
{
'abs_path': exc_source or 'http://example.com/bar.js'
},
],
}
}
]
}
}
def test_bails_without_javascript_event(self):
data = {
'platform': 'python'
}
assert not self.apply_filter(data)
def test_filters_conduit_toolbar(self):
data = self.get_mock_data(exc_value='what does conduitPage even do')
assert self.apply_filter(data)
def test_filters_chrome_extensions(self):
data = self.get_mock_data(exc_source='chrome://my-extension/or/something')
assert self.apply_filter(data)
def test_does_not_filter_generic_data(self):
data = self.get_mock_data()
assert not self.apply_filter(data)
|
py | b406488327a75908bb569f1f19ee03cae7439ce6 | """ Nif Utilities, stores common code that is used across the code base"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2013, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
from bpy_extras.io_utils import axis_conversion
import mathutils
from pyffi.formats.nif import NifFormat
from io_scene_niftools.utils.logging import NifLog
THETA_THRESHOLD_NEGY = 1.0e-9
THETA_THRESHOLD_NEGY_CLOSE = 1.0e-5
def set_bone_orientation(from_forward, from_up):
# if version in (0x14020007, ):
# skyrim
# from_forward = "Z"
# from_up = "Y"
# else:
# ZT2 and other old ones
# from_forward = "X"
# from_up = "Y"
global correction
global correction_inv
correction = axis_conversion(from_forward, from_up).to_4x4()
correction_inv = correction.inverted()
# set these from outside using set_bone_correction_from_version once we have a version number
correction = None
correction_inv = None
def import_keymat(rest_rot_inv, key_matrix):
"""Handles space conversions for imported keys """
return correction @ (rest_rot_inv @ key_matrix) @ correction_inv
def export_keymat(rest_rot, key_matrix, bone):
"""Handles space conversions for exported keys """
if bone:
return rest_rot @ (correction_inv @ key_matrix @ correction)
else:
return rest_rot @ key_matrix
def get_bind_matrix(bone):
"""Get a nif armature-space matrix from a blender bone. """
bind = correction @ correction_inv @ bone.matrix_local @ correction
if bone.parent:
p_bind_restored = correction @ correction_inv @ bone.parent.matrix_local @ correction
bind = p_bind_restored.inverted() @ bind
return bind
def nif_bind_to_blender_bind(nif_armature_space_matrix):
return correction_inv @ correction @ nif_armature_space_matrix @ correction_inv
def import_matrix(n_block, relative_to=None):
"""Retrieves a n_block's transform matrix as a Mathutil.Matrix."""
return mathutils.Matrix(n_block.get_transform(relative_to).as_list()).transposed()
def decompose_srt(b_matrix):
"""Decompose Blender transform matrix as a scale, 4x4 rotation matrix, and translation vector."""
# get matrix components
trans_vec, rot_quat, scale_vec = b_matrix.decompose()
rotmat = rot_quat.to_matrix()
# todo [armature] negative scale is not generated on armature end
# no need to run costly operations here for now
# and fix the sign of scale
# if b_matrix.determinant() < 0:
# scale_vec.negate()
# only uniform scaling allow rather large error to accommodate some nifs
if abs(scale_vec[0] - scale_vec[1]) + abs(scale_vec[1] - scale_vec[2]) > 0.02:
NifLog.warn("Non-uniform scaling not supported. Workaround: apply size and rotation (CTRL-A).")
return scale_vec[0], rotmat.to_4x4(), trans_vec
def get_armature():
"""Get an armature. If there is more than one armature in the scene and some armatures are selected, return the first of the selected armatures. """
src_armatures = [ob for ob in bpy.data.objects if type(ob.data) == bpy.types.Armature]
# do we have armatures?
if src_armatures:
# see if one of these is selected -> get only that one
if len(src_armatures) > 1:
sel_armatures = [ob for ob in src_armatures if ob.select_get()]
if sel_armatures:
return sel_armatures[0]
return src_armatures[0]
def get_object_bind(b_obj):
"""Get the bind matrix of a blender object.
Returns the final NIF matrix for the given blender object.
Blender space and axes order are corrected for the NIF.
Returns a 4x4 mathutils.Matrix()
"""
if isinstance(b_obj, bpy.types.Bone):
return get_bind_matrix(b_obj)
elif isinstance(b_obj, bpy.types.Object):
# TODO [armature] Move to armaturehelper
# if there is a bone parent then the object is parented then get the matrix relative to the bone parent head
if b_obj.parent_bone:
# get parent bone
parent_bone = b_obj.parent.data.bones[b_obj.parent_bone]
# undo what was done on import
mpi = nif_bind_to_blender_bind(b_obj.matrix_parent_inverse).inverted()
mpi.translation.y -= parent_bone.length
return mpi.inverted() @ b_obj.matrix_basis
# just get the local matrix
else:
return b_obj.matrix_local
# Nonetype, maybe other weird stuff
return mathutils.Matrix()
def find_property(n_block, property_type):
"""Find a property."""
if hasattr(n_block, "properties"):
for prop in n_block.properties:
if isinstance(prop, property_type):
return prop
if hasattr(n_block, "bs_properties"):
for prop in n_block.bs_properties:
if isinstance(prop, property_type):
return prop
return None
def find_controller(n_block, controller_type):
"""Find a controller."""
ctrl = n_block.controller
while ctrl:
if isinstance(ctrl, controller_type):
break
ctrl = ctrl.next_controller
return ctrl
def find_extra(n_block, extratype):
# TODO: 3.0 - Optimise
"""Find extra data."""
# pre-10.x.x.x system: extra data chain
extra = n_block.extra_data
while extra:
if isinstance(extra, extratype):
break
extra = extra.next_extra_data
if extra:
return extra
# post-10.x.x.x system: extra data list
for extra in n_block.extra_data_list:
if isinstance(extra, extratype):
return extra
return None
def set_object_matrix(b_obj, block):
"""Set a blender object's transform matrix to a NIF object's transformation matrix in rest pose."""
block.set_transform(get_object_matrix(b_obj))
def get_object_matrix(b_obj):
"""Get a blender object's matrix as NifFormat.Matrix44"""
return mathutils_to_nifformat_matrix(get_object_bind(b_obj))
def set_b_matrix_to_n_block(b_matrix, block):
"""Set a blender matrix to a NIF object's transformation matrix in rest pose."""
# TODO [object] maybe favor this over the above two methods for more flexibility and transparency?
block.set_transform(mathutils_to_nifformat_matrix(b_matrix))
def mathutils_to_nifformat_matrix(b_matrix):
"""Convert a blender matrix to a NifFormat.Matrix44"""
# transpose to swap columns for rows so we can use pyffi's set_rows() directly
# instead of setting every single value manually
n_matrix = NifFormat.Matrix44()
n_matrix.set_rows(*b_matrix.transposed())
return n_matrix
|
py | b40648fe8eef809c24fa2afdc425a6c0b63f18e7 | import altair as alt
from vega_datasets import data
from rpcjs import Dashboard, Page, set_attribute
import rpcjs.elements as html
import rpcjs.binded as forms
source = data.cars()
columns = list(source.columns)
class MyDynamicPage(Page):
def routes(self):
return '/'
def __init__(self):
self.title = 'MyDynamicPage'
self.xlabel = None
self.ylabel = None
def make_form(self):
"""Make a simple form so the user can input the x and y axis"""
self.xlabel, xlabel_html = forms.select_dropdown(columns, callback=self.make_graph)
self.ylabel, ylabel_html = forms.select_dropdown(columns, callback=self.make_graph)
form = html.div(
html.div(
html.header('X axis', level=5),
xlabel_html),
html.div(
html.header('Y axis', level=5),
ylabel_html))
return form
def make_graph(self):
"""Generate the graph when all the inputs are ready"""
xlabel = self.xlabel.get()
ylabel = self.ylabel.get()
if xlabel is None or ylabel is None:
return
chart = alt.Chart(source).mark_circle().encode(
alt.X(xlabel, type='quantitative'),
alt.Y(ylabel, type='quantitative'),
color='Origin:N'
).properties(
width=500,
height=500
).interactive()
# send our graph back to the page
set_attribute('graph_id', 'srcdoc', html.altair_plot(chart, with_iframe=False))
def main(self):
return html.div(
self.make_form(),
# where our graph will be populated
html.iframe("", id='graph_id'))
if __name__ == '__main__':
# go to http://127.0.0.1:5000/
with Dashboard(__name__) as dash:
dash.add_page(MyDynamicPage())
dash.run()
|
py | b4064ab903386b5a23fc521a5b65a60a66f41eb8 | import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="scatter", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.scatter.selected.M
arker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scatter.selected.T
extfont` instance or dict with compatible
properties
""",
),
**kwargs,
)
|
py | b4064b7ed7995e9008287a036dcaaaf0f1f103ee | import memory_baza as baza
def wybierz_symbol(znak):
znak = list(znak)
try:
r = znak[0].upper()
c = znak[1]
id_znak = ord(r) - 65 + baza.ILE_RZEDOW * (int(c) - 1)
symbol = baza.lista_znakow[id_znak]
wynik = [id_znak, symbol]
if obecna_lista[wynik[0]] != '*':
raise ValueError
else:
obecna_lista[wynik[0]] = wynik[1]
return wynik
except (IndexError, ValueError):
print('Błędne koordynaty lub ten symbol jest już odkryty!\n')
pass
def podaj_koordynaty(wynik=None, licznik_wywolan=0):
while wynik == None:
if licznik_wywolan == 0:
znak = input('Którą \'gwiazdkę\' mam odkryć jako pierwszą? (np.A1): ')
else:
znak = input('Podaj drugą \'gwiazdkę\': ')
wynik = wybierz_symbol(znak)
return wynik
# ROZGRYWKA:
licznik_prob = 10
print(f'Masz {licznik_prob} prób(y). Powodzenia! :)')
obecna_lista = ['*'] * baza.ILE_RZEDOW * baza.ILE_KOLUMN
while licznik_prob > 0:
baza.obecna_tablica_wyswietl(obecna_lista)
wynik1 = podaj_koordynaty()
baza.obecna_tablica_wyswietl(obecna_lista)
wynik2 = podaj_koordynaty(licznik_wywolan=1)
if wynik1[0] == wynik2[0]:
print('Podałeś te same koordynaty! Graj dalej.')
print(f'Pozostałe próby: {licznik_prob}\n')
obecna_lista[wynik1[0]] = '*'
continue
if wynik1[1] == wynik2[1]:
if '*' not in obecna_lista:
print('#' * 20, 'GRATULUJĘ WYGRANEJ!!!', '#' * 20)
print('Twoja tablica:')
baza.obecna_tablica_wyswietl(obecna_lista)
exit()
else:
print('Brawo! Zgaduj dalej :)')
else:
licznik_prob -= 1
baza.obecna_tablica_wyswietl(obecna_lista)
print('\nNiestety, niepoprawnie. Zapamiętaj ułożenie symboli.')
reakcja = input('Naciśnij ENTER jeśli mogę przejść dalej.')
baza.nowy_ekran()
obecna_lista[wynik1[0]] = '*'
obecna_lista[wynik2[0]] = '*'
print(f'Pozostałe próby: {licznik_prob}')
print('Niestety skończyły Ci się próby :(')
print('Tablica wyglądała następująco:')
t = baza.stworz_tablice(baza.lista_znakow, baza.ILE_RZEDOW, baza.ILE_KOLUMN)
baza.wyswietl_tablice(t, baza.ILE_RZEDOW, baza.ILE_KOLUMN)
|
py | b4064c2113ce160e78464fb7456764b8c9b9e1fc | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import dataproc_v1beta2
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestWorkflowTemplateServiceClient(object):
def test_create_workflow_template(self):
# Setup Expected Response
id_ = "id3355"
name = "name3373707"
version = 351608024
expected_response = {"id": id_, "name": name, "version": version}
expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
parent = client.region_path("[PROJECT]", "[REGION]")
template = {}
response = client.create_workflow_template(parent, template)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest(
parent=parent, template=template
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_workflow_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup request
parent = client.region_path("[PROJECT]", "[REGION]")
template = {}
with pytest.raises(CustomException):
client.create_workflow_template(parent, template)
def test_get_workflow_template(self):
# Setup Expected Response
id_ = "id3355"
name_2 = "name2-1052831874"
version = 351608024
expected_response = {"id": id_, "name": name_2, "version": version}
expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
response = client.get_workflow_template(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_workflow_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
with pytest.raises(CustomException):
client.get_workflow_template(name)
def test_instantiate_workflow_template(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_instantiate_workflow_template", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
response = client.instantiate_workflow_template(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_instantiate_workflow_template_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_instantiate_workflow_template_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
response = client.instantiate_workflow_template(name)
exception = response.exception()
assert exception.errors[0] == error
def test_instantiate_inline_workflow_template(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_instantiate_inline_workflow_template", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
parent = client.region_path("[PROJECT]", "[REGION]")
template = {}
response = client.instantiate_inline_workflow_template(parent, template)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.InstantiateInlineWorkflowTemplateRequest(
parent=parent, template=template
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_instantiate_inline_workflow_template_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_instantiate_inline_workflow_template_exception",
done=True,
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
parent = client.region_path("[PROJECT]", "[REGION]")
template = {}
response = client.instantiate_inline_workflow_template(parent, template)
exception = response.exception()
assert exception.errors[0] == error
def test_update_workflow_template(self):
# Setup Expected Response
id_ = "id3355"
name = "name3373707"
version = 351608024
expected_response = {"id": id_, "name": name, "version": version}
expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
template = {}
response = client.update_workflow_template(template)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.UpdateWorkflowTemplateRequest(
template=template
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_workflow_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup request
template = {}
with pytest.raises(CustomException):
client.update_workflow_template(template)
def test_list_workflow_templates(self):
# Setup Expected Response
next_page_token = ""
templates_element = {}
templates = [templates_element]
expected_response = {"next_page_token": next_page_token, "templates": templates}
expected_response = workflow_templates_pb2.ListWorkflowTemplatesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
parent = client.region_path("[PROJECT]", "[REGION]")
paged_list_response = client.list_workflow_templates(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.templates[0] == resources[0]
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.ListWorkflowTemplatesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_workflow_templates_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup request
parent = client.region_path("[PROJECT]", "[REGION]")
paged_list_response = client.list_workflow_templates(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_workflow_template(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup Request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
client.delete_workflow_template(name)
assert len(channel.requests) == 1
expected_request = workflow_templates_pb2.DeleteWorkflowTemplateRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_workflow_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.WorkflowTemplateServiceClient()
# Setup request
name = client.workflow_template_path(
"[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"
)
with pytest.raises(CustomException):
client.delete_workflow_template(name)
|
py | b4064c49c623284fcc4917657bba193188d209fd | from __future__ import absolute_import, unicode_literals
# `None` and empty string aren't valid JSON but it's safer to include them as potential empty values.
EMPTY_SERIALIZED_JSON_VALUES = (None, '', '[]', '{}')
|
py | b4064df6783d48b7e64b807af9a0950c983d7a8e | """
*
* Author: Juarez Paulino(coderemite)
* Email: [email protected]
*
"""
n=int(input())
print((n-2)**2) |
py | b4064fc9dde758cf2b2f5e7f00078d00601db9bc | # SPDX-License-Identifier: MIT
# Python imports
import datetime
import os
import sys
# Django imports
import django
# add the repositories root directory to the Python path
sys.path.insert(0, os.path.abspath("../../"))
import calingen # noqa: isort:skip
# for `autodoc`, Django has to be setup (with a minimal setup)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.util.settings_test")
django.setup()
# ### Project Information
project = calingen.__app_name__
author = calingen.__author__
copyright = "{}, {}".format(datetime.datetime.now().year, author)
version = calingen.__version__
release = version
# ### General Configuration
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns
exclude_patterns = [] # type: ignore[var-annotated]
# activate extensions
extensions = [
# automatically insert labels for section titles
"sphinx.ext.autosectionlabel",
# make links to other, often referenced, sites easier
"sphinx.ext.extlinks",
# make graphviz available (needs system installation of graphviz)
"sphinx.ext.graphviz",
# provide links to other, sphinx-generated, documentation
"sphinx.ext.intersphinx",
# allow docstrings to be written in NumPy or Google style
"sphinx.ext.napoleon",
# automatic API documentation using the docstrings
# HINT: Can be (temporarily) disabled to speed up build time!
"autoapi.extension",
# use the RTD theme
# configuration is provided in the HTML Output section
"sphinx_rtd_theme",
]
# "index" is already the default (since Sphinx 2.0), but better be explicit.
master_doc = "index"
modindex_common_prefix = ["calingen."]
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-templates_path
templates_path = ["_templates"]
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-source_suffix
source_suffix = {
".rst": "restructuredtext",
# ".txt": "restructuredtext",
}
# ### Extension Configuration
# ##### autosectionlabel
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# ##### autoapi
autoapi_type = "python"
autoapi_dirs = ["../../calingen"]
# may be set to `False` when switching to manual directives, which is **hopefully* not necessary
autoapi_generate_api_docs = True
# https://sphinx-autoapi.readthedocs.io/en/latest/reference/config.html#confval-autoapi_options
# autoapi_options = ["members"]
# https://sphinx-autoapi.readthedocs.io/en/latest/reference/config.html#confval-autoapi_root
autoapi_root = "api"
autoapi_member_order = "groupwise"
# set the output format to "svg" to allow scaling; "png" is the alternative and default
graphviz_output_format = "svg"
# ##### intersphinx
django_version = ".".join(map(str, django.VERSION[0:2])) # type: ignore[misc]
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"python": ("https://docs.python.org/" + python_version, None),
"django": (
"https://docs.djangoproject.com/en/{}/".format(django_version),
"https://docs.djangoproject.com/en/{}/_objects/".format(django_version),
),
# if the doc is hosted on RTD, the following should work out of the box:
# "celery": ("https://celery.readthedocs.org/en/latest/", None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
def _add_django_roles(app): # type: ignore[no-untyped-def]
"""Adds Django-specific roles to be accessible while linking to Django's documentation.
The roles are actually fetched from Django's own sphinx extension [1]_.
.. [1] https://github.com/django/django/blob/master/docs/_ext/djangodocs.py
"""
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag",
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter",
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
# ##### extlinks
extlinks = {
# will show commit's SHA1
"commit": ("https://github.com/mischback/django-calingen/commit/%s", ""),
# will show "issue [number]"
"issue": ("https://github.com/mischback/django-calingen/issues/%s", "issue "),
# A file or directory. GitHub redirects from blob to tree if needed.
# will show file/path relative to root-directory of the repository
"source": ("https://github.com/mischback/django-calingen/blob/development/%s", ""),
# also available by intersphinx :django:doc:
"djangodoc": ("https://docs.djangoproject.com/en/{}/%s".format(django_version), ""),
# also available by intersphinx (most likely as simple as specifying the full Python path)
"djangoapi": (
"https://docs.djangoproject.com/en/{}/ref/%s".format(django_version),
"",
),
# will show "Wikipedia: [title]"
"wiki": ("https://en.wikipedia.org/wiki/%s", "Wikipedia: "),
}
# ### HTML Output
# set the theme
html_theme = "sphinx_rtd_theme"
html_theme_options = {
# 'canonical_url': 'http://django-calingen.readthedocs.io', # adjust to real url
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
"style_external_links": True, # default: False
# 'vcs_pageview_mode': '',
# 'style_nav_header_background': 'white',
# Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4, # might be decreased?
# 'includehidden': True,
# 'titles_only': False
}
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_static_path
html_static_path = ["_static"]
# provide a logo (max 200px width)
# html_logo = ""
# ### Extension Magic
def setup(app): # type: ignore[no-untyped-def]
"""Let this configuration be its own extension."""
_add_django_roles(app)
|
py | b40650d02c51109e1ab3c2ab3ad57d1ac8e3fcd1 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 6 03:06:37 2017
@author: Ben
"""
# Copyright (c) 2005-2006, California Institute of Technology
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: Andrew Straw
import UniversalLibrary as UL
import numpy
BoardNum = 0
UDStat = 0
Gain = UL.BIP5VOLTS
LowChan = 0
HighChan = 0
Count = 20
Rate = 3125
Options = UL.CONVERTDATA + UL.BACKGROUND + UL.SINGLEIO
ADData = numpy.zeros((Count,), dtype=numpy.int16)
Rate = UL.cbAInScan(BoardNum, LowChan, HighChan, Count,
Rate, Gain, ADData, Options)
Status = UL.RUNNING
CurCount = 0
CurIndex =0
while Status==UL.RUNNING:
Status, CurCount, CurIndex = UL.cbGetStatus(BoardNum, Status, CurCount, CurIndex, UL.AIFUNCTION) |
py | b4065431001796a96870eb292402b0fa376d4d17 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_config_map_key_selector import V1ConfigMapKeySelector # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1ConfigMapKeySelector(unittest.TestCase):
"""V1ConfigMapKeySelector unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ConfigMapKeySelector(self):
"""Test V1ConfigMapKeySelector"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_config_map_key_selector.V1ConfigMapKeySelector() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b40654d4a5fb9b13bd8cf918b4f85479f52e66a2 | import functools
from typing import Callable, Dict, TYPE_CHECKING
import torch
import torch.distributed as dist
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed import distributed_c10d
from torch.distributed.nn.functional import (
reduce_scatter,
)
from torch.distributed._shard.common_op_utils import _register_default_op
from torch.distributed._shard.op_registry_utils import _decorator_func
from torch.utils._pytree import tree_map
if TYPE_CHECKING:
# Only include ShardedTensor when do type checking, exclude it
# from run-time to resolve circular dependency.
from torch.distributed._shard.sharded_tensor import ShardedTensor
# Custom PartialTensor ops
_PARTIAL_TENSOR_OPS: Dict[Callable, Callable] = {}
def _custom_partial_tensor_op(func):
"""
Decorate for custom partial tensor op
Args:
func(Callable): Torch function for which we want to provide a PartialTensor
implementation (ex: torch.nn.functional.linear)
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_PARTIAL_TENSOR_OPS
)
class _PartialTensor(torch.Tensor):
"""
PartialTensor is an abstraction to represent Tensors that need
aggregation across multiple devices and multiple processes.
PartialTensor is initialized in an SPMD like fashion where each rank
initializes the PartialTensor. The PartialTensor object on each rank
then only stores the local partial shard, process group and the
aggregation way to get a full tensor.
PartialTensor doesn't provide any Tensor like operations but is a
wrapper providing the Tensor representing the local partial shard.
We assume the size of each local tensor to be exactly the same.
Users can apply custom distributed sharded computations on top of
this primitive.
Args:
local_partial_shard (Tensor): Partial result stored across ranks.
process_group (ProcessGroup): The process group to aggregate on.
reduce_op (distributed_c10d.ReduceOp): Way to aggregate the partial result.
Default: ``distributed_c10d.ReduceOp.SUM``
Examples:
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> tensor = torch.cat([tensor, tensor + 2])
>>> tensor
tensor([1, 2, 3, 4]) # Rank 0
tensor([3, 4, 5, 6]) # Rank 1
>>> partial_tensor = _PartialTensor(tensor, distributed_c10d.ReduceOp.MAX)
>>> sharding_dim = 0
>>> collect_spec = shard_spec.ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
>>> complete_tensor = partial_tensor.reshard(collect_spec)
>>> complete_tensor
ShardedTensor(
ShardedTensorMetadata(
shards_metadata=[
ShardMetadata(shard_offsets=[0], shard_sizes=[2], placement=rank:0/cuda:0),
ShardMetadata(shard_offsets=[2], shard_sizes=[2], placement=rank:1/cuda:1)],
size=torch.Size([4])
)
>>> complete_tensor.local_tensor()
tensor([3, 4]) # Rank 0
tensor([5, 6]) # Rank 1
>>> # All tensors below are of torch.cfloat type.
>>> # We have 2 process groups, 2 ranks.
>>> tensor = torch.tensor([1, 2]) + 2 * rank
>>> tensor = torch.cat([tensor, tensor + 2])
>>> tensor
tensor([1, 2, 3, 4]) # Rank 0
tensor([3, 4, 5, 6]) # Rank 1
>>> partial_tensor = _PartialTensor(tensor)
>>> complete_tensor = partial_tensor.reshard(collect_spec)
>>> complete_tensor
ShardedTensor(
ShardedTensorMetadata(
shards_metadata=[
ShardMetadata(shard_offsets=[0], shard_sizes=[2], placement=rank:0/cuda:0),
ShardMetadata(shard_offsets=[2], shard_sizes=[2], placement=rank:1/cuda:1)],
size=torch.Size([4])
)
>>> complete_tensor.local_tensor()
tensor([4, 6]) # Rank 0
tensor([8, 10]) # Rank 1
"""
_process_group: distributed_c10d.ProcessGroup
_local_shard: torch.Tensor
_reduce_op: distributed_c10d.ReduceOp
__slots__ = ["_process_group", "_local_shard", "_reduce_op"]
def __new__(cls, local_shard, process_group=None, reduce_op=distributed_c10d.ReduceOp.SUM):
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
local_shard.size(),
dtype=local_shard.dtype,
layout=local_shard.layout,
pin_memory=local_shard.is_pinned(),
requires_grad=local_shard.requires_grad) # type: ignore[arg-type]
r._process_group = ( # type: ignore[attr-defined]
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
r._reduce_op = reduce_op
r._local_shard = local_shard
return r
def __post_init__(self):
if not isinstance(self._reduce_op, distributed_c10d.ReduceOp):
raise ValueError(
"reduce_op needs to be a member of distributed_c10d.ReduceOp."
)
def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> "ShardedTensor":
"""
The reshard happens in two steps logically:
1. Aggregate all the shards of the partial tensor.
2. Shard this tensor according to the provided spec.
In reality, for the sake of performance, we consolidate all partial tensors
across multiple ranks and covert to a sharded tensor in one step.
Args:
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
The specification describing how we reshard the aggregated local result.
Returns:
A :class:`ShardedTensor` filled with local aggregated result.
"""
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
if not isinstance(resharding_spec, shard_spec.ChunkShardingSpec):
raise NotImplementedError("Only ChunkShardingSpec supported for reshard.")
if self._local_shard.is_complex():
raise NotImplementedError("Only real partial tensor supported for reshard.")
sharding_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
chunk_mode_res = self._local_shard.size(sharding_dim) % self._process_group.size()
local_shard = self._local_shard
# Add padding when the size is not divisible by the world size.
if chunk_mode_res != 0:
padding = [0] * (local_shard.dim() * 2)
padding[-1] = self._process_group.size() - chunk_mode_res
local_shard = torch.nn.functional.pad(
local_shard,
tuple(padding),
"constant",
0,
)
current_rank = dist.get_rank(self._process_group) # type: ignore[attr-defined]
rank_idx = None
rearrange_local_shards = False
indices = [0] * self._process_group.size()
for idx, placement in enumerate(resharding_spec.placements): # type: ignore[attr-defined]
if placement.rank() == current_rank: # type: ignore[index, union-attr]
rank_idx = idx # type: ignore[attr-defined]
if placement.rank() != idx: # type: ignore[index, union-attr]
rearrange_local_shards = True
indices[placement.rank()] = idx # type: ignore[index, union-attr]
local_shards = local_shard.chunk(self._process_group.size(), dim=sharding_dim)
if rearrange_local_shards:
# Need to re-arrange original shard_dim of output_tensor_list.
local_shards = [local_shards[idx] for idx in indices] # type: ignore[call-overload]
local_result = reduce_scatter(
torch.empty_like(local_shards[0]), list(local_shards), op=self._reduce_op
)
sharded_tensor_size = self._local_shard.size()
# Remove padding when the size is not divisible by the world size.
if chunk_mode_res != 0:
uneven_local_shards = self._local_shard.chunk(
self._process_group.size(), dim=sharding_dim
)
expected_size = uneven_local_shards[rank_idx].size() # type: ignore[index]
if local_result.size() != expected_size:
local_result = local_result.narrow(
sharding_dim,
0,
expected_size[sharding_dim],
)
return ShardedTensor._init_from_local_tensor(
local_result,
resharding_spec,
sharded_tensor_size,
process_group=self._process_group,
)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# Find process_group
process_group = None
def find_process_group(e):
nonlocal process_group
if process_group is None and isinstance(e, _PartialTensor):
process_group = e._process_group
tree_map(find_process_group, args)
tree_map(find_process_group, kwargs)
if func in _PARTIAL_TENSOR_OPS:
return _PARTIAL_TENSOR_OPS[func](types, args, kwargs, process_group)
# Need to disable all dispatch to print args and kwargs appropriately.
guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
try:
with torch._C.DisableTorchFunction():
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for PartialTensor!")
finally:
del guard
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise RuntimeError(
f"A {cls.__name__} object is being used from c++ "
f"while calling {func.__module__}.{func.__name__} "
"but the there is no custom __torch_dispatch__ implementation for it."
)
def __repr__(self):
return f"PartialTensor({super(_PartialTensor, self).__repr__()})"
def _transpose_impl(types, args=(), kwargs=None, process_group=None):
partial_tensor = args[0]
input = partial_tensor._local_shard
dim0 = args[1]
dim1 = args[2]
return _PartialTensor(
torch.transpose(input, dim0, dim1),
process_group,
partial_tensor._reduce_op
)
@_custom_partial_tensor_op(torch.Tensor.transpose)
def partial_transpose(types, args=(), kwargs=None, process_group=None):
return _transpose_impl(types, args, kwargs, process_group)
@_custom_partial_tensor_op(torch.transpose)
def partial_torch_transpose(types, args=(), kwargs=None, process_group=None):
return _transpose_impl(types, args, kwargs, process_group)
@_custom_partial_tensor_op(torch.cat)
def partial_cat(types, args=(), kwargs=None, process_group=None):
input_list = args[0]
if len(input_list) == 0:
raise RuntimeError('Empty list of tensors to torch.cat!')
local_shards = []
for idx, input in enumerate(input_list):
if not isinstance(input, _PartialTensor):
raise RuntimeError('All inputs need to be an instance of _PartialTensor')
if idx == 0:
reduce_op = input._reduce_op
elif reduce_op != input._reduce_op:
raise RuntimeError(
'All _PartialTensor reduce_ops need to be the same, found: '
'{reduce_op} and {input._reduce_op}'
)
local_shards.append(input._local_shard)
if kwargs is None:
dim = 0
else:
if 'out' in kwargs:
raise RuntimeError('"out" kwarg is not supported!')
dim = kwargs['dim'] if 'dim' in kwargs else 0
return _PartialTensor(torch.cat(local_shards, dim), process_group, input._reduce_op)
# Tensor properties access
_register_default_op(torch.Tensor.requires_grad.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.shape.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.dtype.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.layout.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.size, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.dim, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.ndim.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.is_contiguous, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.contiguous, _custom_partial_tensor_op)
|
py | b40655d6f90c29537a7590b25d47704dab2ac660 | #!L:\006ProgrammerLearningWorkPlace\Flask\GitHub\PersonalBlog\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
py | b40656619ff0761286276ddc782e6b84f9372f19 | import sys
first = True
with open(sys.argv[1]) as f:
for line in f:
line = line.split('\n')[0].split(' ')
gen = line[0]
param1 = line[1:4]
trg = line[4]
if (not first):
print str(gen)+' '+str((-float(param1[0])+1000))+' '+str((-float(param1[1])+1000))+' '+str((-float(param1[2])+1000))+' '+str(trg)
first = False
# k = 0
# for i in line:
# print str(k)+' '+i
# k += 1;
#
|
py | b406571076f8c6859a197e964190452e3254936d | from vis_det.optimize.loss import layout_vis_loss
from vis_det.optimize.optimize import layout_optimize
__all__ = list(globals().keys())
|
py | b406592b744a92b5789ea2d928491e4103cd0306 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TemplateInitArgs', 'Template']
@pulumi.input_type
class TemplateInitArgs:
def __init__(__self__, *,
template: Optional[pulumi.Input['TemplateArgs']] = None):
"""
The set of arguments for constructing a Template resource.
"""
if template is not None:
pulumi.set(__self__, "template", template)
@property
@pulumi.getter
def template(self) -> Optional[pulumi.Input['TemplateArgs']]:
return pulumi.get(self, "template")
@template.setter
def template(self, value: Optional[pulumi.Input['TemplateArgs']]):
pulumi.set(self, "template", value)
warnings.warn("""Template is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class Template(pulumi.CustomResource):
warnings.warn("""Template is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
template: Optional[pulumi.Input[pulumi.InputType['TemplateArgs']]] = None,
__props__=None):
"""
Resource Type definition for AWS::SES::Template
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TemplateInitArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SES::Template
:param str resource_name: The name of the resource.
:param TemplateInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TemplateInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
template: Optional[pulumi.Input[pulumi.InputType['TemplateArgs']]] = None,
__props__=None):
pulumi.log.warn("""Template is deprecated: Template is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TemplateInitArgs.__new__(TemplateInitArgs)
__props__.__dict__["template"] = template
super(Template, __self__).__init__(
'aws-native:ses:Template',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Template':
"""
Get an existing Template resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TemplateInitArgs.__new__(TemplateInitArgs)
__props__.__dict__["template"] = None
return Template(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def template(self) -> pulumi.Output[Optional['outputs.Template']]:
return pulumi.get(self, "template")
|
py | b406596daa1bbdc6db483afbdbb0bf30f7e39ded | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""Utilities to help with unit and functional testing of ssh."""
import os
from operator import setitem, delitem
from signal import SIGKILL
from unittest import skipIf
from subprocess import check_call, check_output
from zope.interface import implementer
from ipaddr import IPAddress
from twisted.python.components import registerAdapter
from twisted.internet import reactor
from twisted.cred.portal import IRealm, Portal
try:
from twisted.conch.ssh.keys import Key
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.interfaces import ISession
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.unix import (
SSHSessionForUnixConchUser,
UnixConchUser,
)
_have_conch = True
except ImportError:
SSHPublicKeyDatabase = UnixConchUser = object
_have_conch = False
if_conch = skipIf(not _have_conch, "twisted.conch must be useable.")
@if_conch
def generate_ssh_key(key_file):
"""
Generate a ssh key.
:param FilePath key_file: Path to create ssh key at.
:return Key: The generated key.
"""
check_call(
[b"ssh-keygen",
# Specify the path where the generated key is written.
b"-f", key_file.path,
# Specify an empty passphrase.
b"-N", b"",
# Generate as little output as possible.
b"-q"])
return Key.fromFile(key_file.path)
class _InMemoryPublicKeyChecker(SSHPublicKeyDatabase):
"""
Check SSH public keys in-memory.
"""
def __init__(self, public_key):
"""
:param Key public_key: The public key we will accept.
"""
self._key = public_key
def checkKey(self, credentials):
"""
Validate some SSH key credentials.
Access is granted only to root since that is the user we expect
for connections from ZFS agents.
"""
return (self._key.blob() == credentials.blob and
credentials.username == b"root")
class _FixedHomeConchUser(UnixConchUser):
"""
An SSH user with a fixed, configurable home directory.
This is like a normal UNIX SSH user except the user's home directory is not
determined by the ``pwd`` database.
"""
def __init__(self, username, home):
"""
:param FilePath home: The path of the directory to use as this user's
home directory.
"""
UnixConchUser.__init__(self, username)
self._home = home
def getHomeDir(self):
"""
Give back the pre-determined home directory.
"""
return self._home.path
def getUserGroupId(self):
"""
Give back some not-strictly-legal ``None`` UID/GID
identifiers. This prevents the Conch server from trying to
switch IDs (which it can't do if it is not running as root).
"""
return None, None
@implementer(ISession)
class _EnvironmentSSHSessionForUnixConchUser(SSHSessionForUnixConchUser):
"""
SSH Session that correctly sets HOME.
Work-around for https://twistedmatrix.com/trac/ticket/7936.
"""
def execCommand(self, proto, cmd):
self.environ['HOME'] = self.avatar.getHomeDir()
return SSHSessionForUnixConchUser.execCommand(self, proto, cmd)
registerAdapter(
_EnvironmentSSHSessionForUnixConchUser, _FixedHomeConchUser, ISession)
@implementer(IRealm)
class _UnixSSHRealm(object):
"""
An ``IRealm`` for a Conch server which gives out ``_FixedHomeConchUser``
users.
"""
def __init__(self, home):
self.home = home
def requestAvatar(self, username, mind, *interfaces):
user = _FixedHomeConchUser(username, self.home)
return interfaces[0], user, user.logout
class _ConchServer(object):
"""
A helper for a test fixture to run an SSH server using Twisted Conch.
:ivar IPv4Address ip: The address the server is listening on.
:ivar int port: The port number the server is listening on.
:ivar _port: An object which provides ``IListeningPort`` and represents the
listening Conch server.
:ivar FilePath home_path: The path of the home directory of the user which
is allowed to authenticate against this server.
:ivar FilePath key_path: The path of an SSH private key which can be used
to authenticate against the server.
:ivar FilePath host_key_path: The path of the server's private host key.
"""
def __init__(self, base_path):
"""
:param FilePath base_path: The path beneath which all of the temporary
SSH server-related files will be created. An ``ssh`` directory
will be created as a child of this directory to hold the key pair
that is generated. An ``sshd`` directory will also be created here
to hold the generated host key. A ``home`` directory is also
created here and used as the home directory for shell logins to the
server.
"""
self.home = base_path.child(b"home")
self.home.makedirs()
ssh_path = base_path.child(b"ssh")
ssh_path.makedirs()
self.key_path = ssh_path.child(b"key")
key = generate_ssh_key(self.key_path)
sshd_path = base_path.child(b"sshd")
sshd_path.makedirs()
self.host_key_path = sshd_path.child(b"ssh_host_key")
generate_ssh_key(self.host_key_path)
factory = OpenSSHFactory()
realm = _UnixSSHRealm(self.home)
checker = _InMemoryPublicKeyChecker(public_key=key.public())
factory.portal = Portal(realm, [checker])
factory.dataRoot = sshd_path.path
factory.moduliRoot = b"/etc/ssh"
self._port = reactor.listenTCP(0, factory, interface=b"127.0.0.1")
self.ip = IPAddress(self._port.getHost().host)
self.port = self._port.getHost().port
def restore(self):
"""
Shut down the SSH server.
:return: A ``Deferred`` that fires when this has been done.
"""
return self._port.stopListening()
@if_conch
def create_ssh_server(base_path):
"""
:py:func:`create_ssh_server` is a fixture which creates and runs a new SSH
server and stops it later. Use the :py:meth:`restore` method of the
returned object to stop the server.
:param FilePath base_path: The path to a directory in which key material
will be generated.
"""
return _ConchServer(base_path)
class _SSHAgent(object):
"""
A helper for a test fixture to run an `ssh-agent` process.
"""
def __init__(self, key_file):
"""
Start an `ssh-agent` and add its socket path and pid to the global
environment so that SSH sub-processes can use it for authentication.
:param FilePath key_file: An SSH private key file which can be used
when authenticating with SSH servers.
"""
self._cleanups = []
output = check_output([b"ssh-agent", b"-c"]).splitlines()
# setenv SSH_AUTH_SOCK /tmp/ssh-5EfGti8RPQbQ/agent.6390;
# setenv SSH_AGENT_PID 6391;
# echo Agent pid 6391;
sock = output[0].split()[2][:-1]
pid = output[1].split()[2][:-1]
self._pid = int(pid)
def patchdict(k, v):
if k in os.environ:
self._cleanups.append(
lambda old=os.environ[k]: setitem(os.environ, k, old))
else:
self._cleanups.append(lambda: delitem(os.environ, k))
os.environ[k] = v
patchdict(b"SSH_AUTH_SOCK", sock)
patchdict(b"SSH_AGENT_PID", pid)
with open(os.devnull, "w") as discard:
# See https://clusterhq.atlassian.net/browse/FLOC-192
check_call(
[b"ssh-add", key_file.path],
stdout=discard, stderr=discard)
def restore(self):
"""
Shut down the SSH agent and restore the test environment to its
previous state.
"""
for cleanup in self._cleanups:
cleanup()
os.kill(self._pid, SIGKILL)
def create_ssh_agent(key_file, testcase=None):
"""
:py:func:`create_ssh_agent` is a fixture which creates and runs a new SSH
agent and stops it later. Use the :py:meth:`restore` method of the
returned object to stop the server.
:param FilePath key_file: The path of an SSH private key which can be
used when authenticating with SSH servers.
:param TestCase testcase: The ``TestCase`` object requiring the SSH
agent. Optional, adds a cleanup if supplied.
:rtype: _SSHAgent
"""
agent = _SSHAgent(key_file)
if testcase:
testcase.addCleanup(agent.restore)
return agent
|
py | b4065a08caaf1fa56a4107a8565bf7d9541dc2d3 | from flask import Blueprint
from .auth import auth
from ..models.annotations import LMDB
URL_FETCH_MAX_LENGTH = 1024 * 1024 * 30
URL_FETCH_TIMEOUT = 10
DOWNLOAD_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/51.0.2704.103 Safari/537.36 Lifelike'
bp = Blueprint('files', __name__, url_prefix='/files')
# TODO: LL-415 Migrate the code to the projects folder once GUI is complete and API refactored
# is this used anymore?
@bp.route('/lmdbs_dates', methods=['GET'])
@auth.login_required
def get_lmdbs_dates():
rows = LMDB.query.all()
return {row.name: row.modified_date for row in rows}
|
py | b4065a2178a5c8b732c455a25d36004f4aead147 | """ Testing https://developsense.com/calendar/calendar.html """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_developsense_calendar(self):
actual = [["*" for end in range(0, 48)] for start in range(0, 48)]
expected = [["*" for end in range(0, 48)] for start in range(0, 48)]
for start in range(1, 49):
for end in range(1, 49):
if start < end:
expected[start-1][end-1] = "1" # Expected Success
else:
expected[start-1][end-1] = "0" # Expected Error
self.open("https://developsense.com/calendar/calendar.html")
self.type("#username", "SeleniumBase")
print("\nPop-up status was incorrect for the following ranges:")
timecode = {}
for i in range(1, 49):
timecode[i] = self.get_text('option[value="%s"]' % i)
for start in range(1, 49):
self.select_option_by_value("#starttime", str(start))
for end in range(1, 49):
self.select_option_by_value("#endtime", str(end))
self.find_element("button#execute").click()
text = self.switch_to_alert().text
if "SUCCESS" in text:
actual[start-1][end-1] = "1" # Actual Success
else:
actual[start-1][end-1] = "0" # Actual Error
# Check if actual result matches expected result
if actual[start-1][end-1] != expected[start-1][end-1]:
start_time = timecode[start]
end_time = timecode[end]
print("%s - %s" % (start_time, end_time))
|
py | b4065a3d98eb895bf3f9f2dd9fda7920ff344556 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import gallery.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('image', models.ImageField(upload_to=gallery.models.Photo.image_upload_to)),
],
options={
'ordering': ('order',),
'abstract': False,
},
bases=(models.Model,),
),
]
|
py | b4065afb45c5f58336bd6b293ec07e55b110b749 | # flake8: noqa: F501
from dataclasses import dataclass
from typing import List, Any
from unittest import TestCase
from chaingreen.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_solution_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
spend_bundle_to_serialized_coin_solution_entry_list,
)
from chaingreen.full_node.generator import run_generator, create_generator_args
from chaingreen.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from chaingreen.types.generator_types import BlockGenerator, CompressorArg, GeneratorArg
from chaingreen.types.spend_bundle import SpendBundle
from chaingreen.util.byte_types import hexstr_to_bytes
from chaingreen.util.ints import uint32
from chaingreen.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm import SExp
import io
from clvm.serialize import sexp_from_stream
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="chaingreen.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="chaingreen.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="chaingreen.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_solution_entry.clvm", package_or_requirement="chaingreen.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_solution_entry_with_prefix.clvm", package_or_requirement="chaingreen.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="chaingreen.wallet.puzzles")
TEST_MULTIPLE = load_clvm(
"test_multiple_generator_input_arguments.clvm", package_or_requirement="chaingreen.wallet.puzzles"
)
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
) # noqa
gen1 = b"aaaaaaaaaa" + original_generator
gen2 = b"bb" + original_generator
FAKE_BLOCK_HEIGHT1 = uint32(100)
FAKE_BLOCK_HEIGHT2 = uint32(200)
@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int
def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
"""
Decompress a transaction by referencing bytes from multiple input generator references
"""
compressed_cse_list = compressed_coin_solution_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)
# TODO aqk: Improve ergonomics of CompressorArg -> GeneratorArg conversion
generator_args = [
GeneratorArg(FAKE_BLOCK_HEIGHT1, args.arg[0].generator),
GeneratorArg(FAKE_BLOCK_HEIGHT2, args.arg[1].generator),
]
return BlockGenerator(program, generator_args)
def spend_bundle_to_coin_solution_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_solution in bundle.coin_solutions:
entry = [
coin_solution.coin.parent_coin_info,
sexp_from_stream(io.BytesIO(bytes(coin_solution.puzzle_reveal)), SExp.to),
coin_solution.coin.amount,
sexp_from_stream(io.BytesIO(bytes(coin_solution.solution)), SExp.to),
]
r.append(entry)
return r
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_multiple_input_gen_refs(self):
start1, end1 = match_standard_transaction_at_any_index(gen1)
start2, end2 = match_standard_transaction_at_any_index(gen2)
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)
prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator(multi_gen, INFINITE_COST)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c, INFINITE_COST)
cost_s, result_s = run_generator(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
def test_spend_byndle_coin_solution(self):
for i in range(0, 10):
sb: SpendBundle = make_spend_bundle(i)
cs1 = SExp.to(spend_bundle_to_coin_solution_entry_list(sb)).as_bin()
cs2 = spend_bundle_to_serialized_coin_solution_entry_list(sb)
assert cs1 == cs2
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
# An empty CSE is invalid. (An empty CSE list may be okay)
# def test_decompress_empty_cse(self):
# cse0 = binutils.assemble("()")
# cost, out = DECOMPRESS_CSE.run_with_cost(INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0])
# print()
# print(out)
def test_decompress_cse(self):
"""Decompress a single CSE / CoinSolutionEntry"""
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
# (deserialize decompress_puzzle puzzle_prefix cse)
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
"Decompress a list of CSEs"
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
[bytes(original_generator)],
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, [bytes(original_generator)]])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end), cse2, DESERIALIZE_MOD
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
|
py | b4065b492bd36b554f91fc5ae767718a9a9f83bc | import asyncio
import functools
import sys
import traceback
import warnings
from collections import defaultdict
from hashlib import md5, sha1, sha256
from itertools import cycle, islice
from time import monotonic
from types import MappingProxyType
from . import hdrs, helpers
from .client_exceptions import (ClientConnectionError,
ClientConnectorCertificateError,
ClientConnectorError, ClientConnectorSSLError,
ClientHttpProxyError,
ClientProxyConnectionError,
ServerFingerprintMismatch, certificate_errors,
ssl_errors)
from .client_proto import ResponseHandler
from .client_reqrep import ClientRequest
from .helpers import SimpleCookie, is_ip_address, noop, sentinel
from .locks import EventResultOrError
from .log import client_logger
from .resolver import DefaultResolver
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
__all__ = ('BaseConnector', 'TCPConnector', 'UnixConnector')
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
class Connection:
_source_traceback = None
_transport = None
def __init__(self, connector, key, protocol, loop):
self._key = key
self._connector = connector
self._loop = loop
self._protocol = protocol
self._callbacks = []
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __repr__(self):
return 'Connection<{}>'.format(self._key)
def __del__(self, _warnings=warnings):
if self._protocol is not None:
_warnings.warn('Unclosed connection {!r}'.format(self),
ResourceWarning)
if self._loop.is_closed():
return
self._connector._release(
self._key, self._protocol, should_close=True)
context = {'client_connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
@property
def loop(self):
return self._loop
@property
def transport(self):
return self._protocol.transport
@property
def protocol(self):
return self._protocol
@property
def writer(self):
return self._protocol.writer
def add_callback(self, callback):
if callback is not None:
self._callbacks.append(callback)
def _notify_release(self):
callbacks, self._callbacks = self._callbacks[:], []
for cb in callbacks:
try:
cb()
except:
pass
def close(self):
self._notify_release()
if self._protocol is not None:
self._connector._release(
self._key, self._protocol, should_close=True)
self._protocol = None
def release(self):
self._notify_release()
if self._protocol is not None:
self._connector._release(
self._key, self._protocol,
should_close=self._protocol.should_close)
self._protocol = None
def detach(self):
self._notify_release()
if self._protocol is not None:
self._connector._release_acquired(self._protocol)
self._protocol = None
@property
def closed(self):
return self._protocol is None or not self._protocol.is_connected()
class _TransportPlaceholder:
""" placeholder for BaseConnector.connect function """
def close(self):
pass
class BaseConnector(object):
"""Base connector class.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
disable_cleanup_closed - Disable clean-up closed ssl transports.
loop - Optional event loop.
"""
_closed = True # prevent AttributeError in __del__ if ctor was failed
_source_traceback = None
# abort transport after 2 seconds (cleanup broken connections)
_cleanup_closed_period = 2.0
def __init__(self, *, keepalive_timeout=sentinel,
force_close=False, limit=100, limit_per_host=0,
enable_cleanup_closed=False, loop=None):
if force_close:
if keepalive_timeout is not None and \
keepalive_timeout is not sentinel:
raise ValueError('keepalive_timeout cannot '
'be set if force_close is True')
else:
if keepalive_timeout is sentinel:
keepalive_timeout = 15.0
if loop is None:
loop = asyncio.get_event_loop()
self._closed = False
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._conns = {}
self._limit = limit
self._limit_per_host = limit_per_host
self._acquired = set()
self._acquired_per_host = defaultdict(set)
self._keepalive_timeout = keepalive_timeout
self._force_close = force_close
self._waiters = defaultdict(list)
self._loop = loop
self._factory = functools.partial(ResponseHandler, loop=loop)
self.cookies = SimpleCookie()
# start keep-alive connection cleanup task
self._cleanup_handle = None
# start cleanup closed transports task
self._cleanup_closed_handle = None
self._cleanup_closed_disabled = not enable_cleanup_closed
self._cleanup_closed_transports = []
self._cleanup_closed()
def __del__(self, _warnings=warnings):
if self._closed:
return
if not self._conns:
return
conns = [repr(c) for c in self._conns.values()]
self.close()
_warnings.warn("Unclosed connector {!r}".format(self),
ResourceWarning)
context = {'connector': self,
'connections': conns,
'message': 'Unclosed connector'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@property
def force_close(self):
"""Ultimately close connection on releasing if True."""
return self._force_close
@property
def limit(self):
"""The total number for simultaneous connections.
If limit is 0 the connector has no limit.
The default limit size is 100.
"""
return self._limit
@property
def limit_per_host(self):
"""The limit_per_host for simultaneous connections
to the same endpoint.
Endpoints are the same if they are have equal
(host, port, is_ssl) triple.
"""
return self._limit_per_host
def _cleanup(self):
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
now = self._loop.time()
timeout = self._keepalive_timeout
if self._conns:
connections = {}
deadline = now - timeout
for key, conns in self._conns.items():
alive = []
for proto, use_time in conns:
if proto.is_connected():
if use_time - deadline < 0:
transport = proto.close()
if (key[-1] and not self._cleanup_closed_disabled):
self._cleanup_closed_transports.append(
transport)
else:
alive.append((proto, use_time))
if alive:
connections[key] = alive
self._conns = connections
if self._conns:
self._cleanup_handle = helpers.weakref_handle(
self, '_cleanup', timeout, self._loop)
def _cleanup_closed(self):
"""Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close.
"""
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
self._cleanup_closed_transports = []
if not self._cleanup_closed_disabled:
self._cleanup_closed_handle = helpers.weakref_handle(
self, '_cleanup_closed',
self._cleanup_closed_period, self._loop)
def close(self):
"""Close all opened transports."""
if self._closed:
return
self._closed = True
try:
if self._loop.is_closed():
return noop()
# cancel cleanup task
if self._cleanup_handle:
self._cleanup_handle.cancel()
# cancel cleanup close task
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for data in self._conns.values():
for proto, t0 in data:
proto.close()
for proto in self._acquired:
proto.close()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
finally:
self._conns.clear()
self._acquired.clear()
self._waiters.clear()
self._cleanup_handle = None
self._cleanup_closed_transports.clear()
self._cleanup_closed_handle = None
@property
def closed(self):
"""Is connector closed.
A readonly property.
"""
return self._closed
@asyncio.coroutine
def connect(self, req):
"""Get from pool or create new connection."""
key = req.connection_key
if self._limit:
# total calc available connections
available = self._limit - len(self._waiters) - len(self._acquired)
# check limit per host
if (self._limit_per_host and available > 0 and
key in self._acquired_per_host):
available = self._limit_per_host - len(
self._acquired_per_host.get(key))
elif self._limit_per_host and key in self._acquired_per_host:
# check limit per host
available = self._limit_per_host - len(
self._acquired_per_host.get(key))
else:
available = 1
# Wait if there are no available connections.
if available <= 0:
fut = helpers.create_future(self._loop)
# This connection will now count towards the limit.
waiters = self._waiters[key]
waiters.append(fut)
try:
yield from fut
finally:
# remove a waiter even if it was cancelled
waiters.remove(fut)
if not waiters:
del self._waiters[key]
proto = self._get(key)
if proto is None:
placeholder = _TransportPlaceholder()
self._acquired.add(placeholder)
self._acquired_per_host[key].add(placeholder)
try:
proto = yield from self._create_connection(req)
if self._closed:
proto.close()
raise ClientConnectionError("Connector is closed.")
except:
# signal to waiter
for waiter in self._waiters[key]:
if not waiter.done():
waiter.set_result(None)
break
raise
finally:
if not self._closed:
self._acquired.remove(placeholder)
self._acquired_per_host[key].remove(placeholder)
self._acquired.add(proto)
self._acquired_per_host[key].add(proto)
return Connection(self, key, proto, self._loop)
def _get(self, key):
try:
conns = self._conns[key]
except KeyError:
return None
t1 = self._loop.time()
while conns:
proto, t0 = conns.pop()
if proto.is_connected():
if t1 - t0 > self._keepalive_timeout:
transport = proto.close()
# only for SSL transports
if key[-1] and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
else:
if not conns:
# The very last connection was reclaimed: drop the key
del self._conns[key]
return proto
# No more connections: drop the key
del self._conns[key]
return None
def _release_waiter(self):
# always release only one waiter
if self._limit:
# if we have limit and we have available
if self._limit - len(self._acquired) > 0:
for key, waiters in self._waiters.items():
if waiters:
if not waiters[0].done():
waiters[0].set_result(None)
break
elif self._limit_per_host:
# if we have dont have limit but have limit per host
# then release first available
for key, waiters in self._waiters.items():
if waiters:
if not waiters[0].done():
waiters[0].set_result(None)
break
def _release_acquired(self, key, proto):
if self._closed:
# acquired connection is already released on connector closing
return
try:
self._acquired.remove(proto)
self._acquired_per_host[key].remove(proto)
if not self._acquired_per_host[key]:
del self._acquired_per_host[key]
except KeyError: # pragma: no cover
# this may be result of undetermenistic order of objects
# finalization due garbage collection.
pass
else:
self._release_waiter()
def _release(self, key, protocol, *, should_close=False):
if self._closed:
# acquired connection is already released on connector closing
return
self._release_acquired(key, protocol)
if self._force_close:
should_close = True
if should_close or protocol.should_close:
transport = protocol.close()
if key[-1] and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
else:
conns = self._conns.get(key)
if conns is None:
conns = self._conns[key] = []
conns.append((protocol, self._loop.time()))
if self._cleanup_handle is None:
self._cleanup_handle = helpers.weakref_handle(
self, '_cleanup', self._keepalive_timeout, self._loop)
@asyncio.coroutine
def _create_connection(self, req):
raise NotImplementedError()
_SSL_OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
class _DNSCacheTable:
def __init__(self, ttl=None):
self._addrs = {}
self._addrs_rr = {}
self._timestamps = {}
self._ttl = ttl
def __contains__(self, host):
return host in self._addrs
@property
def addrs(self):
return self._addrs
def add(self, host, addrs):
self._addrs[host] = addrs
self._addrs_rr[host] = cycle(addrs)
if self._ttl:
self._timestamps[host] = monotonic()
def remove(self, host):
self._addrs.pop(host, None)
self._addrs_rr.pop(host, None)
if self._ttl:
self._timestamps.pop(host, None)
def clear(self):
self._addrs.clear()
self._addrs_rr.clear()
self._timestamps.clear()
def next_addrs(self, host):
# Return an iterator that will get at maximum as many addrs
# there are for the specific host starting from the last
# not itereated addr.
return islice(self._addrs_rr[host], len(self._addrs[host]))
def expired(self, host):
if self._ttl is None:
return False
return (
self._timestamps[host] + self._ttl
) < monotonic()
class TCPConnector(BaseConnector):
"""TCP connector.
verify_ssl - Set to True to check ssl certifications.
fingerprint - Pass the binary md5, sha1, or sha256
digest of the expected certificate in DER format to verify
that the certificate the server presents matches. See also
https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
resolve - (Deprecated) Set to True to do DNS lookup for
host name.
resolver - Enable DNS lookups and use this
resolver
use_dns_cache - Use memory cache for DNS lookups.
ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
family - socket address family
local_addr - local tuple of (host, port) to bind socket to
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
loop - Optional event loop.
"""
def __init__(self, *, verify_ssl=True, fingerprint=None,
resolve=sentinel, use_dns_cache=True, ttl_dns_cache=10,
family=0, ssl_context=None, local_addr=None,
resolver=None, keepalive_timeout=sentinel,
force_close=False, limit=100, limit_per_host=0,
enable_cleanup_closed=False, loop=None):
super().__init__(keepalive_timeout=keepalive_timeout,
force_close=force_close,
limit=limit, limit_per_host=limit_per_host,
enable_cleanup_closed=enable_cleanup_closed,
loop=loop)
if not verify_ssl and ssl_context is not None:
raise ValueError(
"Either disable ssl certificate validation by "
"verify_ssl=False or specify ssl_context, not both.")
self._verify_ssl = verify_ssl
if fingerprint:
digestlen = len(fingerprint)
hashfunc = HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError('fingerprint has invalid length')
elif hashfunc is md5 or hashfunc is sha1:
warnings.warn('md5 and sha1 are insecure and deprecated. '
'Use sha256.',
DeprecationWarning, stacklevel=2)
client_logger.warn('md5 and sha1 are insecure and deprecated. '
'Use sha256.')
self._hashfunc = hashfunc
self._fingerprint = fingerprint
if resolver is None:
resolver = DefaultResolver(loop=self._loop)
self._resolver = resolver
self._use_dns_cache = use_dns_cache
self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
self._throttle_dns_events = {}
self._ssl_context = ssl_context
self._family = family
self._local_addr = local_addr
def close(self):
"""Close all ongoing DNS calls."""
for ev in self._throttle_dns_events.values():
ev.cancel()
super().close()
@property
def verify_ssl(self):
"""Do check for ssl certifications?"""
return self._verify_ssl
@property
def fingerprint(self):
"""Expected ssl certificate fingerprint."""
return self._fingerprint
@property
def ssl_context(self):
"""SSLContext instance for https requests.
Lazy property, creates context on demand.
"""
if ssl is None: # pragma: no cover
raise RuntimeError('SSL is not supported.')
if self._ssl_context is None:
if not self._verify_ssl:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.options |= _SSL_OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
else:
sslcontext = ssl.create_default_context()
self._ssl_context = sslcontext
return self._ssl_context
@property
def family(self):
"""Socket family like AF_INET."""
return self._family
@property
def use_dns_cache(self):
"""True if local DNS caching is enabled."""
return self._use_dns_cache
@property
def cached_hosts(self):
"""Read-only dict of cached DNS record."""
return MappingProxyType(self._cached_hosts.addrs)
def clear_dns_cache(self, host=None, port=None):
"""Remove specified host/port or clear all dns local cache."""
if host is not None and port is not None:
self._cached_hosts.remove((host, port))
elif host is not None or port is not None:
raise ValueError("either both host and port "
"or none of them are allowed")
else:
self._cached_hosts.clear()
@asyncio.coroutine
def _resolve_host(self, host, port):
if is_ip_address(host):
return [{'hostname': host, 'host': host, 'port': port,
'family': self._family, 'proto': 0, 'flags': 0}]
if not self._use_dns_cache:
return (yield from self._resolver.resolve(
host, port, family=self._family))
key = (host, port)
if (key in self._cached_hosts) and\
(not self._cached_hosts.expired(key)):
return self._cached_hosts.next_addrs(key)
if key in self._throttle_dns_events:
yield from self._throttle_dns_events[key].wait()
else:
self._throttle_dns_events[key] = \
EventResultOrError(self._loop)
try:
addrs = yield from \
asyncio.shield(self._resolver.resolve(host,
port,
family=self._family),
loop=self._loop)
self._cached_hosts.add(key, addrs)
self._throttle_dns_events[key].set()
except Exception as e:
# any DNS exception, independently of the implementation
# is set for the waiters to raise the same exception.
self._throttle_dns_events[key].set(exc=e)
raise
finally:
self._throttle_dns_events.pop(key)
return self._cached_hosts.next_addrs(key)
@asyncio.coroutine
def _create_connection(self, req):
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.proxy:
_, proto = yield from self._create_proxy_connection(req)
else:
_, proto = yield from self._create_direct_connection(req)
return proto
def _get_ssl_context(self, req):
"""Logic to get the correct SSL context
0. if req.ssl is false, return None
1. if ssl_context is specified in req, use it
2. if _ssl_context is specified in self, use it
3. otherwise:
1. if verify_ssl is not specified in req, use self.ssl_context
(will generate a default context according to self.verify_ssl)
2. if verify_ssl is True in req, generate a default SSL context
3. if verify_ssl is False in req, generate a SSL context that
won't verify
"""
if req.ssl:
sslcontext = req.ssl_context or self._ssl_context
if not sslcontext:
if req.verify_ssl is None:
sslcontext = self.ssl_context
elif req.verify_ssl:
sslcontext = ssl.create_default_context()
else:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.options |= _SSL_OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
else:
sslcontext = None
return sslcontext
def _get_fingerprint_and_hashfunc(self, req):
if req.fingerprint:
return (req.fingerprint, req._hashfunc)
elif self.fingerprint:
return (self.fingerprint, self._hashfunc)
else:
return (None, None)
@asyncio.coroutine
def _create_direct_connection(self, req):
sslcontext = self._get_ssl_context(req)
fingerprint, hashfunc = self._get_fingerprint_and_hashfunc(req)
hosts = yield from self._resolve_host(req.url.raw_host, req.port)
for hinfo in hosts:
try:
host = hinfo['host']
port = hinfo['port']
transp, proto = yield from self._loop.create_connection(
self._factory, host, port,
ssl=sslcontext, family=hinfo['family'],
proto=hinfo['proto'], flags=hinfo['flags'],
server_hostname=hinfo['hostname'] if sslcontext else None,
local_addr=self._local_addr)
has_cert = transp.get_extra_info('sslcontext')
if has_cert and fingerprint:
sock = transp.get_extra_info('socket')
if not hasattr(sock, 'getpeercert'):
# Workaround for asyncio 3.5.0
# Starting from 3.5.1 version
# there is 'ssl_object' extra info in transport
sock = transp._ssl_protocol._sslpipe.ssl_object
# gives DER-encoded cert as a sequence of bytes (or None)
cert = sock.getpeercert(binary_form=True)
assert cert
got = hashfunc(cert).digest()
expected = fingerprint
if got != expected:
transp.close()
if not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transp)
raise ServerFingerprintMismatch(
expected, got, host, port)
return transp, proto
except certificate_errors as exc:
raise ClientConnectorCertificateError(
req.connection_key, exc) from exc
except ssl_errors as exc:
raise ClientConnectorSSLError(req.connection_key, exc) from exc
except OSError as exc:
raise ClientConnectorError(req.connection_key, exc) from exc
@asyncio.coroutine
def _create_proxy_connection(self, req):
headers = {}
if req.proxy_headers is not None:
headers = req.proxy_headers
headers[hdrs.HOST] = req.headers[hdrs.HOST]
proxy_req = ClientRequest(
hdrs.METH_GET, req.proxy,
headers=headers,
auth=req.proxy_auth,
loop=self._loop,
verify_ssl=req.verify_ssl,
fingerprint=req.fingerprint,
ssl_context=req.ssl_context)
try:
# create connection to proxy server
transport, proto = yield from self._create_direct_connection(
proxy_req)
except OSError as exc:
raise ClientProxyConnectionError(proxy_req, exc) from exc
auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
if auth is not None:
if not req.ssl:
req.headers[hdrs.PROXY_AUTHORIZATION] = auth
else:
proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
if req.ssl:
sslcontext = self._get_ssl_context(req)
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = hdrs.METH_CONNECT
proxy_req.url = req.url
key = (req.host, req.port, req.ssl)
conn = Connection(self, key, proto, self._loop)
proxy_resp = proxy_req.send(conn)
try:
resp = yield from proxy_resp.start(conn, True)
except:
proxy_resp.close()
conn.close()
raise
else:
conn._protocol = None
conn._transport = None
try:
if resp.status != 200:
raise ClientHttpProxyError(
proxy_resp.request_info,
resp.history,
code=resp.status,
message=resp.reason,
headers=resp.headers)
rawsock = transport.get_extra_info('socket', default=None)
if rawsock is None:
raise RuntimeError(
"Transport does not expose socket instance")
# Duplicate the socket, so now we can close proxy transport
rawsock = rawsock.dup()
finally:
transport.close()
transport, proto = yield from self._loop.create_connection(
self._factory, ssl=sslcontext, sock=rawsock,
server_hostname=req.host)
finally:
proxy_resp.close()
return transport, proto
class UnixConnector(BaseConnector):
"""Unix socket connector.
path - Unix socket path.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
loop - Optional event loop.
Usage:
>>> conn = UnixConnector(path='/path/to/socket')
>>> session = ClientSession(connector=conn)
>>> resp = yield from session.get('http://python.org')
"""
def __init__(self, path, force_close=False, keepalive_timeout=sentinel,
limit=100, limit_per_host=0, loop=None):
super().__init__(force_close=force_close,
keepalive_timeout=keepalive_timeout,
limit=limit, limit_per_host=limit_per_host, loop=loop)
self._path = path
@property
def path(self):
"""Path to unix socket."""
return self._path
@asyncio.coroutine
def _create_connection(self, req):
_, proto = yield from self._loop.create_unix_connection(
self._factory, self._path)
return proto
|
py | b4065b4d45df85d725ea671f240eba852edfa917 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.test_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.testing import test_case
from tensorflow_datasets.testing import test_utils
tf.compat.v1.enable_eager_execution()
class RunInGraphAndEagerTest(test_case.TestCase):
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if tf.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_utils.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_utils.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(
set(l),
{
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
},
)
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if tf.executing_eagerly() else "graph"
class ExampleTest(test_case.TestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_utils.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_eager", "run_eager"])
self.assertEqual(modes[2:], ["setup_graph", "run_graph"])
if __name__ == "__main__":
test_utils.test_main()
|
py | b4065bdd71f82b170e73d1ce1d7dcc12f7bce0b0 | """Limits of sequences"""
from sympy.calculus.accumulationbounds import AccumulationBounds
from sympy.core.add import Add
from sympy.core.function import PoleError
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.numbers import fibonacci
from sympy.functions.combinatorial.factorials import factorial, subfactorial
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.miscellaneous import Max, Min
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.series.limits import Limit
def difference_delta(expr, n=None, step=1):
"""Difference Operator.
Explanation
===========
Discrete analog of differential operator. Given a sequence x[n],
returns the sequence x[n + step] - x[n].
Examples
========
>>> from sympy import difference_delta as dd
>>> from sympy.abc import n
>>> dd(n*(n + 1), n)
2*n + 2
>>> dd(n*(n + 1), n, 2)
4*n + 6
References
==========
.. [1] https://reference.wolfram.com/language/ref/DifferenceDelta.html
"""
expr = sympify(expr)
if n is None:
f = expr.free_symbols
if len(f) == 1:
n = f.pop()
elif len(f) == 0:
return S.Zero
else:
raise ValueError("Since there is more than one variable in the"
" expression, a variable must be supplied to"
" take the difference of %s" % expr)
step = sympify(step)
if step.is_number is False or step.is_finite is False:
raise ValueError("Step should be a finite number.")
if hasattr(expr, '_eval_difference_delta'):
result = expr._eval_difference_delta(n, step)
if result:
return result
return expr.subs(n, n + step) - expr
def dominant(expr, n):
"""Finds the dominant term in a sum, that is a term that dominates
every other term.
Explanation
===========
If limit(a/b, n, oo) is oo then a dominates b.
If limit(a/b, n, oo) is 0 then b dominates a.
Otherwise, a and b are comparable.
If there is no unique dominant term, then returns ``None``.
Examples
========
>>> from sympy import Sum
>>> from sympy.series.limitseq import dominant
>>> from sympy.abc import n, k
>>> dominant(5*n**3 + 4*n**2 + n + 1, n)
5*n**3
>>> dominant(2**n + Sum(k, (k, 0, n)), n)
2**n
See Also
========
sympy.series.limitseq.dominant
"""
terms = Add.make_args(expr.expand(func=True))
term0 = terms[-1]
comp = [term0] # comparable terms
for t in terms[:-1]:
e = (term0 / t).gammasimp()
l = limit_seq(e, n)
if l is None:
return None
elif l.is_zero:
term0 = t
comp = [term0]
elif l not in [S.Infinity, S.NegativeInfinity]:
comp.append(t)
if len(comp) > 1:
return None
return term0
def _limit_inf(expr, n):
try:
return Limit(expr, n, S.Infinity).doit(deep=False)
except (NotImplementedError, PoleError):
return None
def _limit_seq(expr, n, trials):
from sympy.concrete.summations import Sum
for i in range(trials):
if not expr.has(Sum):
result = _limit_inf(expr, n)
if result is not None:
return result
num, den = expr.as_numer_denom()
if not den.has(n) or not num.has(n):
result = _limit_inf(expr.doit(), n)
if result is not None:
return result
return None
num, den = (difference_delta(t.expand(), n) for t in [num, den])
expr = (num / den).gammasimp()
if not expr.has(Sum):
result = _limit_inf(expr, n)
if result is not None:
return result
num, den = expr.as_numer_denom()
num = dominant(num, n)
if num is None:
return None
den = dominant(den, n)
if den is None:
return None
expr = (num / den).gammasimp()
def limit_seq(expr, n=None, trials=5):
"""Finds the limit of a sequence as index ``n`` tends to infinity.
Parameters
==========
expr : Expr
SymPy expression for the ``n-th`` term of the sequence
n : Symbol, optional
The index of the sequence, an integer that tends to positive
infinity. If None, inferred from the expression unless it has
multiple symbols.
trials: int, optional
The algorithm is highly recursive. ``trials`` is a safeguard from
infinite recursion in case the limit is not easily computed by the
algorithm. Try increasing ``trials`` if the algorithm returns ``None``.
Admissible Terms
================
The algorithm is designed for sequences built from rational functions,
indefinite sums, and indefinite products over an indeterminate n. Terms of
alternating sign are also allowed, but more complex oscillatory behavior is
not supported.
Examples
========
>>> from sympy import limit_seq, Sum, binomial
>>> from sympy.abc import n, k, m
>>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n)
5/3
>>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n)
3/4
>>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n)
4
See Also
========
sympy.series.limitseq.dominant
References
==========
.. [1] Computing Limits of Sequences - Manuel Kauers
"""
from sympy.concrete.summations import Sum
if n is None:
free = expr.free_symbols
if len(free) == 1:
n = free.pop()
elif not free:
return expr
else:
raise ValueError("Expression has more than one variable. "
"Please specify a variable.")
elif n not in expr.free_symbols:
return expr
expr = expr.rewrite(fibonacci, S.GoldenRatio)
expr = expr.rewrite(factorial, subfactorial, gamma)
n_ = Dummy("n", integer=True, positive=True)
n1 = Dummy("n", odd=True, positive=True)
n2 = Dummy("n", even=True, positive=True)
# If there is a negative term raised to a power involving n, or a
# trigonometric function, then consider even and odd n separately.
powers = (p.as_base_exp() for p in expr.atoms(Pow))
if (any(b.is_negative and e.has(n) for b, e in powers) or
expr.has(cos, sin)):
L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials)
if L1 is not None:
L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials)
if L1 != L2:
if L1.is_comparable and L2.is_comparable:
return AccumulationBounds(Min(L1, L2), Max(L1, L2))
else:
return None
else:
L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials)
if L1 is not None:
return L1
else:
if expr.is_Add:
limits = [limit_seq(term, n, trials) for term in expr.args]
if any(result is None for result in limits):
return None
else:
return Add(*limits)
# Maybe the absolute value is easier to deal with (though not if
# it has a Sum). If it tends to 0, the limit is 0.
elif not expr.has(Sum):
lim = _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials)
if lim is not None and lim.is_zero:
return S.Zero
|
py | b4065c22f5e375de7cb7d8b3f8c151ea54c8ba8b | from __future__ import print_function
__author__ = 'cpaulson'
import sys
sys.path.insert(0, '../')
import pyKriging
from pyKriging.regressionkrige import regression_kriging
from pyKriging.samplingplan import samplingplan
from pyKriging.krige import kriging
# The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here
sp = samplingplan(2)
X = sp.optimallhc(30)
# Next, we define the problem we would like to solve
testfun = pyKriging.testfunctions().branin_noise
# We generate our observed values based on our sampling plan and the test function
y = testfun(X)
print(X, y)
testfun = pyKriging.testfunctions().branin
print('Setting up the Kriging Model')
# Now that we have our initial data, we can create an instance of a kriging model
k = regression_kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
k.train(optimizer='pso')
k1 = kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
k1.train(optimizer='pso')
print(k.Lambda)
k.snapshot()
for i in range(1):
newpoints = k.infill(5)
for point in newpoints:
print('Adding point {}'.format(point))
newValue = testfun(point)[0]
k.addPoint(point, newValue)
k1.addPoint(point, newValue)
k.train()
k1.train()
# k.snapshot()
#
# # #And plot the model
print('Now plotting final results...')
print(k.Lambda)
k.plot(show=False)
k1.plot()
|
py | b4065c508ad97ef189dc642f14ae605f39b80cde | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.models.discrete.thompson import ThompsonSampler
from ax.utils.common.testutils import TestCase
class ThompsonSamplerTest(TestCase):
def setUp(self):
self.Xs = [[[1, 1], [2, 2], [3, 3], [4, 4]]] # 4 arms, each of dimensionality 2
self.Ys = [[1, 2, 3, 4]]
self.Yvars = [[1, 1, 1, 1]]
self.parameter_values = [[1, 2, 3, 4], [1, 2, 3, 4]]
self.outcome_names = ["x", "y"] # not used for regular TS
self.multiple_metrics_Xs = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
] # 2 metrics, 4 arms, each of dimensionality 2
self.multiple_metrics_Ys = [[1, 2, 3, 4], [0, 0, 0, 1]]
self.multiple_metrics_Yvars = [[1, 1, 1, 1], [1, 1, 1, 1]]
def testThompsonSampler(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(weights, [0.725, 0.225, 0.05]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerValidation(self):
generator = ThompsonSampler(min_weight=0.01)
# all Xs are not the same
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [3, 3], [4, 4]], [[1, 1], [2, 2], [4, 4]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# multiple observations per parameterization
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# these are not the same observations, so should not error
generator.fit(
Xs=[[[1, 1], [2.0, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# requires objective weights
with self.assertRaises(ValueError):
generator.gen(5, self.parameter_values, objective_weights=None)
def testThompsonSamplerMinWeight(self):
generator = ThompsonSampler(min_weight=0.01)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=5, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(weights, [0.725, 0.225, 0.05]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerUniformWeights(self):
generator = ThompsonSampler(min_weight=0.0, uniform_weights=True)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(weights, [0.33, 0.33, 0.33]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerInfeasible(self):
generator = ThompsonSampler(min_weight=0.9)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ValueError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(1),
)
def testThompsonSamplerOutcomeConstraints(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=4,
parameter_values=self.parameter_values,
objective_weights=np.array([1, 0]),
outcome_constraints=(
# pass in multiples of the same constraint
# to ensure that shapes are correct for multiple constraints
np.array([[0, 1], [0, 1], [0, 1]]),
np.array([[1], [1], [1]]),
),
)
self.assertEqual(arms, [[3, 3], [4, 4], [2, 2], [1, 1]])
for weight, expected_weight in zip(weights, [0.4, 0.4, 0.15, 0.05]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerOutcomeConstraintsInfeasible(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ValueError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(2),
outcome_constraints=(np.array([[0, 1]]), np.array([[-10]])),
)
def testThompsonSamplerPredict(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
f, cov = generator.predict([[1, 1], [3, 3]])
self.assertTrue(np.array_equal(f, np.array([[1], [3]])))
self.assertTrue(np.array_equal(cov, np.ones((2, 1, 1))))
with self.assertRaises(ValueError):
generator.predict([[1, 2]])
|
py | b4065d855ff71a36b6252741e0242201e7b155e1 | """Test entity_registry API."""
import pytest
from homeassistant.components.config import entity_registry
from homeassistant.const import ATTR_ICON
from homeassistant.helpers.device_registry import DeviceEntryDisabler
from homeassistant.helpers.entity_registry import RegistryEntry, RegistryEntryDisabler
from tests.common import (
MockConfigEntry,
MockEntity,
MockEntityPlatform,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
hass.loop.run_until_complete(entity_registry.async_setup(hass))
yield hass.loop.run_until_complete(hass_ws_client(hass))
@pytest.fixture
def device_registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
async def test_list_entities(hass, client):
"""Test list entries."""
mock_registry(
hass,
{
"test_domain.name": RegistryEntry(
entity_id="test_domain.name",
unique_id="1234",
platform="test_platform",
name="Hello World",
),
"test_domain.no_name": RegistryEntry(
entity_id="test_domain.no_name",
unique_id="6789",
platform="test_platform",
),
},
)
await client.send_json({"id": 5, "type": "config/entity_registry/list"})
msg = await client.receive_json()
assert msg["result"] == [
{
"config_entry_id": None,
"device_id": None,
"area_id": None,
"disabled_by": None,
"entity_id": "test_domain.name",
"name": "Hello World",
"icon": None,
"platform": "test_platform",
"entity_category": None,
},
{
"config_entry_id": None,
"device_id": None,
"area_id": None,
"disabled_by": None,
"entity_id": "test_domain.no_name",
"name": None,
"icon": None,
"platform": "test_platform",
"entity_category": None,
},
]
async def test_get_entity(hass, client):
"""Test get entry."""
mock_registry(
hass,
{
"test_domain.name": RegistryEntry(
entity_id="test_domain.name",
unique_id="1234",
platform="test_platform",
name="Hello World",
),
"test_domain.no_name": RegistryEntry(
entity_id="test_domain.no_name",
unique_id="6789",
platform="test_platform",
),
},
)
await client.send_json(
{"id": 5, "type": "config/entity_registry/get", "entity_id": "test_domain.name"}
)
msg = await client.receive_json()
assert msg["result"] == {
"area_id": None,
"capabilities": None,
"config_entry_id": None,
"device_class": None,
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.name",
"icon": None,
"name": "Hello World",
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
}
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/get",
"entity_id": "test_domain.no_name",
}
)
msg = await client.receive_json()
assert msg["result"] == {
"area_id": None,
"capabilities": None,
"config_entry_id": None,
"device_class": None,
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.no_name",
"icon": None,
"name": None,
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "6789",
}
async def test_update_entity(hass, client):
"""Test updating entity."""
registry = mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="before update",
icon="icon:before update",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "before update"
assert state.attributes[ATTR_ICON] == "icon:before update"
# UPDATE AREA, DEVICE_CLASS, ICON AND NAME
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"area_id": "mock-area-id",
"device_class": "custom_device_class",
"icon": "icon:after update",
"name": "after update",
}
)
msg = await client.receive_json()
assert msg["result"] == {
"entity_entry": {
"area_id": "mock-area-id",
"capabilities": None,
"config_entry_id": None,
"device_class": "custom_device_class",
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.world",
"icon": "icon:after update",
"name": "after update",
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
}
}
state = hass.states.get("test_domain.world")
assert state.name == "after update"
assert state.attributes[ATTR_ICON] == "icon:after update"
# UPDATE DISABLED_BY TO USER
await client.send_json(
{
"id": 7,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"disabled_by": RegistryEntryDisabler.USER,
}
)
msg = await client.receive_json()
assert hass.states.get("test_domain.world") is None
assert (
registry.entities["test_domain.world"].disabled_by is RegistryEntryDisabler.USER
)
# UPDATE DISABLED_BY TO NONE
await client.send_json(
{
"id": 8,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"disabled_by": None,
}
)
msg = await client.receive_json()
assert msg["result"] == {
"entity_entry": {
"area_id": "mock-area-id",
"capabilities": None,
"config_entry_id": None,
"device_class": "custom_device_class",
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.world",
"icon": "icon:after update",
"name": "after update",
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
},
"reload_delay": 30,
}
async def test_update_entity_require_restart(hass, client):
"""Test updating entity."""
config_entry = MockConfigEntry(domain="test_platform")
config_entry.add_to_hass(hass)
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
config_entry_id=config_entry.entry_id,
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
# UPDATE DISABLED_BY TO NONE
await client.send_json(
{
"id": 8,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"disabled_by": None,
}
)
msg = await client.receive_json()
assert msg["result"] == {
"entity_entry": {
"area_id": None,
"capabilities": None,
"config_entry_id": config_entry.entry_id,
"device_class": None,
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.world",
"icon": None,
"name": None,
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
},
"require_restart": True,
}
async def test_enable_entity_disabled_device(hass, client, device_registry):
"""Test enabling entity of disabled device."""
config_entry = MockConfigEntry(domain="test_platform")
config_entry.add_to_hass(hass)
device = device_registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
disabled_by=DeviceEntryDisabler.USER,
)
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
config_entry_id=config_entry.entry_id,
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
device_id=device.id,
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
# UPDATE DISABLED_BY TO NONE
await client.send_json(
{
"id": 8,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"disabled_by": None,
}
)
msg = await client.receive_json()
assert not msg["success"]
async def test_update_entity_no_changes(hass, client):
"""Test update entity with no changes."""
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="name of entity",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
state = hass.states.get("test_domain.world")
assert state is not None
assert state.name == "name of entity"
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"name": "name of entity",
}
)
msg = await client.receive_json()
assert msg["result"] == {
"entity_entry": {
"area_id": None,
"capabilities": None,
"config_entry_id": None,
"device_class": None,
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.world",
"icon": None,
"name": "name of entity",
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
}
}
state = hass.states.get("test_domain.world")
assert state.name == "name of entity"
async def test_get_nonexisting_entity(client):
"""Test get entry with nonexisting entity."""
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/get",
"entity_id": "test_domain.no_name",
}
)
msg = await client.receive_json()
assert not msg["success"]
async def test_update_nonexisting_entity(client):
"""Test update a nonexisting entity."""
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.no_name",
"name": "new-name",
}
)
msg = await client.receive_json()
assert not msg["success"]
async def test_update_entity_id(hass, client):
"""Test update entity id."""
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
)
},
)
platform = MockEntityPlatform(hass)
entity = MockEntity(unique_id="1234")
await platform.async_add_entities([entity])
assert hass.states.get("test_domain.world") is not None
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"new_entity_id": "test_domain.planet",
}
)
msg = await client.receive_json()
assert msg["result"] == {
"entity_entry": {
"area_id": None,
"capabilities": None,
"config_entry_id": None,
"device_class": None,
"device_id": None,
"disabled_by": None,
"entity_category": None,
"entity_id": "test_domain.planet",
"icon": None,
"name": None,
"original_device_class": None,
"original_icon": None,
"original_name": None,
"platform": "test_platform",
"unique_id": "1234",
}
}
assert hass.states.get("test_domain.world") is None
assert hass.states.get("test_domain.planet") is not None
async def test_update_existing_entity_id(hass, client):
"""Test update entity id to an already registered entity id."""
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
),
"test_domain.planet": RegistryEntry(
entity_id="test_domain.planet",
unique_id="2345",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
),
},
)
platform = MockEntityPlatform(hass)
entities = [MockEntity(unique_id="1234"), MockEntity(unique_id="2345")]
await platform.async_add_entities(entities)
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"new_entity_id": "test_domain.planet",
}
)
msg = await client.receive_json()
assert not msg["success"]
async def test_update_invalid_entity_id(hass, client):
"""Test update entity id to an invalid entity id."""
mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
)
},
)
platform = MockEntityPlatform(hass)
entities = [MockEntity(unique_id="1234"), MockEntity(unique_id="2345")]
await platform.async_add_entities(entities)
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/update",
"entity_id": "test_domain.world",
"new_entity_id": "another_domain.planet",
}
)
msg = await client.receive_json()
assert not msg["success"]
async def test_remove_entity(hass, client):
"""Test removing entity."""
registry = mock_registry(
hass,
{
"test_domain.world": RegistryEntry(
entity_id="test_domain.world",
unique_id="1234",
# Using component.async_add_entities is equal to platform "domain"
platform="test_platform",
name="before update",
)
},
)
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/remove",
"entity_id": "test_domain.world",
}
)
msg = await client.receive_json()
assert msg["success"]
assert len(registry.entities) == 0
async def test_remove_non_existing_entity(hass, client):
"""Test removing non existing entity."""
mock_registry(hass, {})
await client.send_json(
{
"id": 6,
"type": "config/entity_registry/remove",
"entity_id": "test_domain.world",
}
)
msg = await client.receive_json()
assert not msg["success"]
|
py | b4065e51d5f6cded1cdfd6b5464ecc0917d25022 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
import pickle
import re
from devil.android import apk_helper
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_exception
from pylib.base import test_instance
from pylib.constants import host_paths
from pylib.instrumentation import test_result
from pylib.instrumentation import instrumentation_parser
from pylib.symbols import deobfuscator
from pylib.symbols import stack_symbolizer
from pylib.utils import dexdump
from pylib.utils import instrumentation_tracing
from pylib.utils import proguard
from pylib.utils import shared_preference_utils
from pylib.utils import test_filter
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import unittest_util # pylint: disable=import-error
# Ref: http://developer.android.com/reference/android/app/Activity.html
_ACTIVITY_RESULT_CANCELED = 0
_ACTIVITY_RESULT_OK = -1
_COMMAND_LINE_PARAMETER = 'cmdlinearg-parameter'
_DEFAULT_ANNOTATIONS = [
'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest', 'IntegrationTest']
_EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS = [
'DisabledTest', 'FlakyTest', 'Manual']
_VALID_ANNOTATIONS = set(_DEFAULT_ANNOTATIONS +
_EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS)
_EXTRA_DRIVER_TEST_LIST = (
'org.chromium.test.driver.OnDeviceInstrumentationDriver.TestList')
_EXTRA_DRIVER_TEST_LIST_FILE = (
'org.chromium.test.driver.OnDeviceInstrumentationDriver.TestListFile')
_EXTRA_DRIVER_TARGET_PACKAGE = (
'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetPackage')
_EXTRA_DRIVER_TARGET_CLASS = (
'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass')
_EXTRA_TIMEOUT_SCALE = (
'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale')
_TEST_LIST_JUNIT4_RUNNERS = [
'org.chromium.base.test.BaseChromiumAndroidJUnitRunner']
_SKIP_PARAMETERIZATION = 'SkipCommandLineParameterization'
_PARAMETERIZED_COMMAND_LINE_FLAGS = 'ParameterizedCommandLineFlags'
_PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES = (
'ParameterizedCommandLineFlags$Switches')
_NATIVE_CRASH_RE = re.compile('(process|native) crash', re.IGNORECASE)
_PICKLE_FORMAT_VERSION = 12
class MissingSizeAnnotationError(test_exception.TestException):
def __init__(self, class_name):
super(MissingSizeAnnotationError, self).__init__(class_name +
': Test method is missing required size annotation. Add one of: ' +
', '.join('@' + a for a in _VALID_ANNOTATIONS))
class CommandLineParameterizationException(test_exception.TestException):
def __init__(self, msg):
super(CommandLineParameterizationException, self).__init__(msg)
class TestListPickleException(test_exception.TestException):
pass
# TODO(jbudorick): Make these private class methods of
# InstrumentationTestInstance once the instrumentation junit3_runner_class is
# deprecated.
def ParseAmInstrumentRawOutput(raw_output):
"""Parses the output of an |am instrument -r| call.
Args:
raw_output: the output of an |am instrument -r| call as a list of lines
Returns:
A 3-tuple containing:
- the instrumentation code as an integer
- the instrumentation result as a list of lines
- the instrumentation statuses received as a list of 2-tuples
containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to a list of
strings, one for each line.
"""
parser = instrumentation_parser.InstrumentationParser(raw_output)
statuses = list(parser.IterStatus())
code, bundle = parser.GetResult()
return (code, bundle, statuses)
def GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms, device_abi,
symbolizer):
"""Generate test results from |statuses|.
Args:
result_code: The overall status code as an integer.
result_bundle: The summary bundle dump as a dict.
statuses: A list of 2-tuples containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to string values
Note that this is the same as the third item in the 3-tuple returned by
|_ParseAmInstrumentRawOutput|.
start_ms: The start time of the test in milliseconds.
duration_ms: The duration of the test in milliseconds.
device_abi: The device_abi, which is needed for symbolization.
symbolizer: The symbolizer used to symbolize stack.
Returns:
A list containing an instance of InstrumentationTestResult for each test
parsed.
"""
results = []
current_result = None
for status_code, bundle in statuses:
test_class = bundle.get('class', '')
test_method = bundle.get('test', '')
if test_class and test_method:
test_name = '%s#%s' % (test_class, test_method)
else:
continue
if status_code == instrumentation_parser.STATUS_CODE_START:
if current_result:
results.append(current_result)
current_result = test_result.InstrumentationTestResult(
test_name, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
else:
if status_code == instrumentation_parser.STATUS_CODE_OK:
if bundle.get('test_skipped', '').lower() in ('true', '1', 'yes'):
current_result.SetType(base_test_result.ResultType.SKIP)
elif current_result.GetType() == base_test_result.ResultType.UNKNOWN:
current_result.SetType(base_test_result.ResultType.PASS)
elif status_code == instrumentation_parser.STATUS_CODE_SKIP:
current_result.SetType(base_test_result.ResultType.SKIP)
elif status_code == instrumentation_parser.STATUS_CODE_ASSUMPTION_FAILURE:
current_result.SetType(base_test_result.ResultType.SKIP)
else:
if status_code not in (instrumentation_parser.STATUS_CODE_ERROR,
instrumentation_parser.STATUS_CODE_FAILURE):
logging.error('Unrecognized status code %d. Handling as an error.',
status_code)
current_result.SetType(base_test_result.ResultType.FAIL)
if 'stack' in bundle:
if symbolizer and device_abi:
current_result.SetLog(
'%s\n%s' % (
bundle['stack'],
'\n'.join(symbolizer.ExtractAndResolveNativeStackTraces(
bundle['stack'], device_abi))))
else:
current_result.SetLog(bundle['stack'])
if current_result:
if current_result.GetType() == base_test_result.ResultType.UNKNOWN:
crashed = (result_code == _ACTIVITY_RESULT_CANCELED
and any(_NATIVE_CRASH_RE.search(l)
for l in result_bundle.itervalues()))
if crashed:
current_result.SetType(base_test_result.ResultType.CRASH)
results.append(current_result)
return results
def FilterTests(tests, filter_str=None, annotations=None,
excluded_annotations=None):
"""Filter a list of tests
Args:
tests: a list of tests. e.g. [
{'annotations": {}, 'class': 'com.example.TestA', 'method':'test1'},
{'annotations": {}, 'class': 'com.example.TestB', 'method':'test2'}]
filter_str: googletest-style filter string.
annotations: a dict of wanted annotations for test methods.
exclude_annotations: a dict of annotations to exclude.
Return:
A list of filtered tests
"""
def gtest_filter(t):
if not filter_str:
return True
# Allow fully-qualified name as well as an omitted package.
unqualified_class_test = {
'class': t['class'].split('.')[-1],
'method': t['method']
}
names = [
GetTestName(t, sep='.'),
GetTestName(unqualified_class_test, sep='.'),
GetUniqueTestName(t, sep='.')
]
if t['is_junit4']:
names += [
GetTestNameWithoutParameterPostfix(t, sep='.'),
GetTestNameWithoutParameterPostfix(unqualified_class_test, sep='.')
]
pattern_groups = filter_str.split('-')
if len(pattern_groups) > 1:
negative_filter = pattern_groups[1]
if unittest_util.FilterTestNames(names, negative_filter):
return []
positive_filter = pattern_groups[0]
return unittest_util.FilterTestNames(names, positive_filter)
def annotation_filter(all_annotations):
if not annotations:
return True
return any_annotation_matches(annotations, all_annotations)
def excluded_annotation_filter(all_annotations):
if not excluded_annotations:
return True
return not any_annotation_matches(excluded_annotations,
all_annotations)
def any_annotation_matches(filter_annotations, all_annotations):
return any(
ak in all_annotations
and annotation_value_matches(av, all_annotations[ak])
for ak, av in filter_annotations)
def annotation_value_matches(filter_av, av):
if filter_av is None:
return True
elif isinstance(av, dict):
tav_from_dict = av['value']
# If tav_from_dict is an int, the 'in' operator breaks, so convert
# filter_av and manually compare. See https://crbug.com/1019707
if isinstance(tav_from_dict, int):
return int(filter_av) == tav_from_dict
else:
return filter_av in tav_from_dict
elif isinstance(av, list):
return filter_av in av
return filter_av == av
filtered_tests = []
for t in tests:
# Gtest filtering
if not gtest_filter(t):
continue
# Enforce that all tests declare their size.
if not any(a in _VALID_ANNOTATIONS for a in t['annotations']):
raise MissingSizeAnnotationError(GetTestName(t))
if (not annotation_filter(t['annotations'])
or not excluded_annotation_filter(t['annotations'])):
continue
filtered_tests.append(t)
return filtered_tests
# TODO(yolandyan): remove this once the tests are converted to junit4
def GetAllTestsFromJar(test_jar):
pickle_path = '%s-proguard.pickle' % test_jar
try:
tests = GetTestsFromPickle(pickle_path, os.path.getmtime(test_jar))
except TestListPickleException as e:
logging.info('Could not get tests from pickle: %s', e)
logging.info('Getting tests from JAR via proguard.')
tests = _GetTestsFromProguard(test_jar)
SaveTestsToPickle(pickle_path, tests)
return tests
def GetAllTestsFromApk(test_apk):
pickle_path = '%s-dexdump.pickle' % test_apk
try:
tests = GetTestsFromPickle(pickle_path, os.path.getmtime(test_apk))
except TestListPickleException as e:
logging.info('Could not get tests from pickle: %s', e)
logging.info('Getting tests from dex via dexdump.')
tests = _GetTestsFromDexdump(test_apk)
SaveTestsToPickle(pickle_path, tests)
return tests
def GetTestsFromPickle(pickle_path, test_mtime):
if not os.path.exists(pickle_path):
raise TestListPickleException('%s does not exist.' % pickle_path)
if os.path.getmtime(pickle_path) <= test_mtime:
raise TestListPickleException('File is stale: %s' % pickle_path)
with open(pickle_path, 'r') as f:
pickle_data = pickle.load(f)
if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION:
raise TestListPickleException('PICKLE_FORMAT_VERSION has changed.')
return pickle_data['TEST_METHODS']
# TODO(yolandyan): remove this once the test listing from java runner lands
@instrumentation_tracing.no_tracing
def _GetTestsFromProguard(jar_path):
p = proguard.Dump(jar_path)
class_lookup = dict((c['class'], c) for c in p['classes'])
def is_test_class(c):
return c['class'].endswith('Test')
def is_test_method(m):
return m['method'].startswith('test')
def recursive_class_annotations(c):
s = c['superclass']
if s in class_lookup:
a = recursive_class_annotations(class_lookup[s])
else:
a = {}
a.update(c['annotations'])
return a
def stripped_test_class(c):
return {
'class': c['class'],
'annotations': recursive_class_annotations(c),
'methods': [m for m in c['methods'] if is_test_method(m)],
'superclass': c['superclass'],
}
return [stripped_test_class(c) for c in p['classes']
if is_test_class(c)]
def _GetTestsFromDexdump(test_apk):
dump = dexdump.Dump(test_apk)
tests = []
def get_test_methods(methods):
return [
{
'method': m,
# No annotation info is available from dexdump.
# Set MediumTest annotation for default.
'annotations': {'MediumTest': None},
} for m in methods if m.startswith('test')]
for package_name, package_info in dump.iteritems():
for class_name, class_info in package_info['classes'].iteritems():
if class_name.endswith('Test'):
tests.append({
'class': '%s.%s' % (package_name, class_name),
'annotations': {},
'methods': get_test_methods(class_info['methods']),
'superclass': class_info['superclass'],
})
return tests
def SaveTestsToPickle(pickle_path, tests):
pickle_data = {
'VERSION': _PICKLE_FORMAT_VERSION,
'TEST_METHODS': tests,
}
with open(pickle_path, 'w') as pickle_file:
pickle.dump(pickle_data, pickle_file)
class MissingJUnit4RunnerException(test_exception.TestException):
"""Raised when JUnit4 runner is not provided or specified in apk manifest"""
def __init__(self):
super(MissingJUnit4RunnerException, self).__init__(
'JUnit4 runner is not provided or specified in test apk manifest.')
def GetTestName(test, sep='#'):
"""Gets the name of the given test.
Note that this may return the same name for more than one test, e.g. if a
test is being run multiple times with different parameters.
Args:
test: the instrumentation test dict.
sep: the character(s) that should join the class name and the method name.
Returns:
The test name as a string.
"""
test_name = '%s%s%s' % (test['class'], sep, test['method'])
assert ' *-:' not in test_name, (
'The test name must not contain any of the characters in " *-:". See '
'https://crbug.com/912199')
return test_name
def GetTestNameWithoutParameterPostfix(
test, sep='#', parameterization_sep='__'):
"""Gets the name of the given JUnit4 test without parameter postfix.
For most WebView JUnit4 javatests, each test is parameterizatized with
"__sandboxed_mode" to run in both non-sandboxed mode and sandboxed mode.
This function returns the name of the test without parameterization
so test filters can match both parameterized and non-parameterized tests.
Args:
test: the instrumentation test dict.
sep: the character(s) that should join the class name and the method name.
parameterization_sep: the character(s) that seperate method name and method
parameterization postfix.
Returns:
The test name without parameter postfix as a string.
"""
name = GetTestName(test, sep=sep)
return name.split(parameterization_sep)[0]
def GetUniqueTestName(test, sep='#'):
"""Gets the unique name of the given test.
This will include text to disambiguate between tests for which GetTestName
would return the same name.
Args:
test: the instrumentation test dict.
sep: the character(s) that should join the class name and the method name.
Returns:
The unique test name as a string.
"""
display_name = GetTestName(test, sep=sep)
if test.get('flags', [None])[0]:
sanitized_flags = [x.replace('-', '_') for x in test['flags']]
display_name = '%s_with_%s' % (display_name, '_'.join(sanitized_flags))
assert ' *-:' not in display_name, (
'The test name must not contain any of the characters in " *-:". See '
'https://crbug.com/912199')
return display_name
class InstrumentationTestInstance(test_instance.TestInstance):
def __init__(self, args, data_deps_delegate, error_func):
super(InstrumentationTestInstance, self).__init__()
self._additional_apks = []
self._apk_under_test = None
self._apk_under_test_incremental_install_json = None
self._modules = None
self._fake_modules = None
self._package_info = None
self._suite = None
self._test_apk = None
self._test_apk_incremental_install_json = None
self._test_jar = None
self._test_package = None
self._junit3_runner_class = None
self._junit4_runner_class = None
self._junit4_runner_supports_listing = None
self._test_support_apk = None
self._initializeApkAttributes(args, error_func)
self._data_deps = None
self._data_deps_delegate = None
self._runtime_deps_path = None
self._initializeDataDependencyAttributes(args, data_deps_delegate)
self._annotations = None
self._excluded_annotations = None
self._test_filter = None
self._initializeTestFilterAttributes(args)
self._flags = None
self._use_apk_under_test_flags_file = False
self._initializeFlagAttributes(args)
self._driver_apk = None
self._driver_package = None
self._driver_name = None
self._initializeDriverAttributes()
self._screenshot_dir = None
self._timeout_scale = None
self._wait_for_java_debugger = None
self._initializeTestControlAttributes(args)
self._coverage_directory = None
self._initializeTestCoverageAttributes(args)
self._store_tombstones = False
self._symbolizer = None
self._enable_java_deobfuscation = False
self._deobfuscator = None
self._initializeLogAttributes(args)
self._edit_shared_prefs = []
self._initializeEditPrefsAttributes(args)
self._replace_system_package = None
self._initializeReplaceSystemPackageAttributes(args)
self._use_webview_provider = None
self._initializeUseWebviewProviderAttributes(args)
self._external_shard_index = args.test_launcher_shard_index
self._total_external_shards = args.test_launcher_total_shards
def _initializeApkAttributes(self, args, error_func):
if args.apk_under_test:
apk_under_test_path = args.apk_under_test
if (not args.apk_under_test.endswith('.apk')
and not args.apk_under_test.endswith('.apks')):
apk_under_test_path = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.apk_under_test)
# TODO(jbudorick): Move the realpath up to the argument parser once
# APK-by-name is no longer supported.
apk_under_test_path = os.path.realpath(apk_under_test_path)
if not os.path.exists(apk_under_test_path):
error_func('Unable to find APK under test: %s' % apk_under_test_path)
self._apk_under_test = apk_helper.ToHelper(apk_under_test_path)
test_apk_path = args.test_apk
if not os.path.exists(test_apk_path):
test_apk_path = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
'%s.apk' % args.test_apk)
# TODO(jbudorick): Move the realpath up to the argument parser once
# APK-by-name is no longer supported.
test_apk_path = os.path.realpath(test_apk_path)
if not os.path.exists(test_apk_path):
error_func('Unable to find test APK: %s' % test_apk_path)
self._test_apk = apk_helper.ToHelper(test_apk_path)
self._suite = os.path.splitext(os.path.basename(args.test_apk))[0]
self._apk_under_test_incremental_install_json = (
args.apk_under_test_incremental_install_json)
self._test_apk_incremental_install_json = (
args.test_apk_incremental_install_json)
if self._test_apk_incremental_install_json:
assert self._suite.endswith('_incremental')
self._suite = self._suite[:-len('_incremental')]
self._modules = args.modules
self._fake_modules = args.fake_modules
self._test_jar = args.test_jar
self._test_support_apk = apk_helper.ToHelper(os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%sSupport.apk' % self._suite))
if not self._test_jar:
logging.warning('Test jar not specified. Test runner will not have '
'Java annotation info available. May not handle test '
'timeouts correctly.')
elif not os.path.exists(self._test_jar):
error_func('Unable to find test JAR: %s' % self._test_jar)
self._test_package = self._test_apk.GetPackageName()
all_instrumentations = self._test_apk.GetAllInstrumentations()
all_junit3_runner_classes = [
x for x in all_instrumentations if ('0xffffffff' in x.get(
'chromium-junit3', ''))]
all_junit4_runner_classes = [
x for x in all_instrumentations if ('0xffffffff' not in x.get(
'chromium-junit3', ''))]
if len(all_junit3_runner_classes) > 1:
logging.warning('This test apk has more than one JUnit3 instrumentation')
if len(all_junit4_runner_classes) > 1:
logging.warning('This test apk has more than one JUnit4 instrumentation')
self._junit3_runner_class = (
all_junit3_runner_classes[0]['android:name']
if all_junit3_runner_classes else self.test_apk.GetInstrumentationName())
self._junit4_runner_class = (
all_junit4_runner_classes[0]['android:name']
if all_junit4_runner_classes else None)
if self._junit4_runner_class:
if self._test_apk_incremental_install_json:
self._junit4_runner_supports_listing = next(
(True for x in self._test_apk.GetAllMetadata()
if 'real-instr' in x[0] and x[1] in _TEST_LIST_JUNIT4_RUNNERS),
False)
else:
self._junit4_runner_supports_listing = (
self._junit4_runner_class in _TEST_LIST_JUNIT4_RUNNERS)
self._package_info = None
if self._apk_under_test:
package_under_test = self._apk_under_test.GetPackageName()
for package_info in constants.PACKAGE_INFO.itervalues():
if package_under_test == package_info.package:
self._package_info = package_info
break
if not self._package_info:
logging.warning('Unable to find package info for %s', self._test_package)
for apk in args.additional_apks:
if not os.path.exists(apk):
error_func('Unable to find additional APK: %s' % apk)
self._additional_apks = (
[apk_helper.ToHelper(x) for x in args.additional_apks])
def _initializeDataDependencyAttributes(self, args, data_deps_delegate):
self._data_deps = []
self._data_deps_delegate = data_deps_delegate
self._runtime_deps_path = args.runtime_deps_path
if not self._runtime_deps_path:
logging.warning('No data dependencies will be pushed.')
def _initializeTestFilterAttributes(self, args):
self._test_filter = test_filter.InitializeFilterFromArgs(args)
def annotation_element(a):
a = a.split('=', 1)
return (a[0], a[1] if len(a) == 2 else None)
if args.annotation_str:
self._annotations = [
annotation_element(a) for a in args.annotation_str.split(',')]
elif not self._test_filter:
self._annotations = [
annotation_element(a) for a in _DEFAULT_ANNOTATIONS]
else:
self._annotations = []
if args.exclude_annotation_str:
self._excluded_annotations = [
annotation_element(a) for a in args.exclude_annotation_str.split(',')]
else:
self._excluded_annotations = []
requested_annotations = set(a[0] for a in self._annotations)
if not args.run_disabled:
self._excluded_annotations.extend(
annotation_element(a) for a in _EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS
if a not in requested_annotations)
def _initializeFlagAttributes(self, args):
self._use_apk_under_test_flags_file = args.use_apk_under_test_flags_file
self._flags = ['--enable-test-intents']
if args.command_line_flags:
self._flags.extend(args.command_line_flags)
if args.device_flags_file:
with open(args.device_flags_file) as device_flags_file:
stripped_lines = (l.strip() for l in device_flags_file)
self._flags.extend(flag for flag in stripped_lines if flag)
if args.strict_mode and args.strict_mode != 'off' and (
# TODO(yliuyliu): Turn on strict mode for coverage once
# crbug/1006397 is fixed.
not args.coverage_dir):
self._flags.append('--strict-mode=' + args.strict_mode)
def _initializeDriverAttributes(self):
self._driver_apk = os.path.join(
constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
'OnDeviceInstrumentationDriver.apk')
if os.path.exists(self._driver_apk):
driver_apk = apk_helper.ApkHelper(self._driver_apk)
self._driver_package = driver_apk.GetPackageName()
self._driver_name = driver_apk.GetInstrumentationName()
else:
self._driver_apk = None
def _initializeTestControlAttributes(self, args):
self._screenshot_dir = args.screenshot_dir
self._timeout_scale = args.timeout_scale or 1
self._wait_for_java_debugger = args.wait_for_java_debugger
def _initializeTestCoverageAttributes(self, args):
self._coverage_directory = args.coverage_dir
def _initializeLogAttributes(self, args):
self._enable_java_deobfuscation = args.enable_java_deobfuscation
self._store_tombstones = args.store_tombstones
self._symbolizer = stack_symbolizer.Symbolizer(
self.apk_under_test.path if self.apk_under_test else None)
def _initializeEditPrefsAttributes(self, args):
if not hasattr(args, 'shared_prefs_file') or not args.shared_prefs_file:
return
if not isinstance(args.shared_prefs_file, str):
logging.warning("Given non-string for a filepath")
return
self._edit_shared_prefs = shared_preference_utils.ExtractSettingsFromJson(
args.shared_prefs_file)
def _initializeReplaceSystemPackageAttributes(self, args):
if (not hasattr(args, 'replace_system_package')
or not args.replace_system_package):
return
self._replace_system_package = args.replace_system_package
def _initializeUseWebviewProviderAttributes(self, args):
if (not hasattr(args, 'use_webview_provider')
or not args.use_webview_provider):
return
self._use_webview_provider = args.use_webview_provider
@property
def additional_apks(self):
return self._additional_apks
@property
def apk_under_test(self):
return self._apk_under_test
@property
def apk_under_test_incremental_install_json(self):
return self._apk_under_test_incremental_install_json
@property
def modules(self):
return self._modules
@property
def fake_modules(self):
return self._fake_modules
@property
def coverage_directory(self):
return self._coverage_directory
@property
def driver_apk(self):
return self._driver_apk
@property
def driver_package(self):
return self._driver_package
@property
def driver_name(self):
return self._driver_name
@property
def edit_shared_prefs(self):
return self._edit_shared_prefs
@property
def external_shard_index(self):
return self._external_shard_index
@property
def flags(self):
return self._flags
@property
def junit3_runner_class(self):
return self._junit3_runner_class
@property
def junit4_runner_class(self):
return self._junit4_runner_class
@property
def junit4_runner_supports_listing(self):
return self._junit4_runner_supports_listing
@property
def package_info(self):
return self._package_info
@property
def replace_system_package(self):
return self._replace_system_package
@property
def use_webview_provider(self):
return self._use_webview_provider
@property
def screenshot_dir(self):
return self._screenshot_dir
@property
def store_tombstones(self):
return self._store_tombstones
@property
def suite(self):
return self._suite
@property
def symbolizer(self):
return self._symbolizer
@property
def test_apk(self):
return self._test_apk
@property
def test_apk_incremental_install_json(self):
return self._test_apk_incremental_install_json
@property
def test_jar(self):
return self._test_jar
@property
def test_support_apk(self):
return self._test_support_apk
@property
def test_package(self):
return self._test_package
@property
def timeout_scale(self):
return self._timeout_scale
@property
def total_external_shards(self):
return self._total_external_shards
@property
def use_apk_under_test_flags_file(self):
return self._use_apk_under_test_flags_file
@property
def wait_for_java_debugger(self):
return self._wait_for_java_debugger
#override
def TestType(self):
return 'instrumentation'
#override
def GetPreferredAbis(self):
# We could alternatively take the intersection of what they all support,
# but it should never be the case that they support different things.
apks = [self._test_apk, self._apk_under_test] + self._additional_apks
for apk in apks:
if apk:
ret = apk.GetAbis()
if ret:
return ret
return []
#override
def SetUp(self):
self._data_deps.extend(
self._data_deps_delegate(self._runtime_deps_path))
if self._enable_java_deobfuscation:
self._deobfuscator = deobfuscator.DeobfuscatorPool(
self.test_apk.path + '.mapping')
def GetDataDependencies(self):
return self._data_deps
def GetTests(self):
if self.test_jar:
raw_tests = GetAllTestsFromJar(self.test_jar)
else:
raw_tests = GetAllTestsFromApk(self.test_apk.path)
return self.ProcessRawTests(raw_tests)
def MaybeDeobfuscateLines(self, lines):
if not self._deobfuscator:
return lines
return self._deobfuscator.TransformLines(lines)
def ProcessRawTests(self, raw_tests):
inflated_tests = self._ParameterizeTestsWithFlags(
self._InflateTests(raw_tests))
if self._junit4_runner_class is None and any(
t['is_junit4'] for t in inflated_tests):
raise MissingJUnit4RunnerException()
filtered_tests = FilterTests(
inflated_tests, self._test_filter, self._annotations,
self._excluded_annotations)
if self._test_filter and not filtered_tests:
for t in inflated_tests:
logging.debug(' %s', GetUniqueTestName(t))
logging.warning('Unmatched Filter: %s', self._test_filter)
return filtered_tests
# pylint: disable=no-self-use
def _InflateTests(self, tests):
inflated_tests = []
for c in tests:
for m in c['methods']:
a = dict(c['annotations'])
a.update(m['annotations'])
inflated_tests.append({
'class': c['class'],
'method': m['method'],
'annotations': a,
'is_junit4': c['superclass'] == 'java.lang.Object'
})
return inflated_tests
def _ParameterizeTestsWithFlags(self, tests):
def _checkParameterization(annotations):
types = [
_PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES,
_PARAMETERIZED_COMMAND_LINE_FLAGS,
]
if types[0] in annotations and types[1] in annotations:
raise CommandLineParameterizationException(
'Multiple command-line parameterization types: {}.'.format(
', '.join(types)))
def _switchesToFlags(switches):
return ['--{}'.format(s) for s in switches if s]
def _annotationToSwitches(clazz, methods):
if clazz == _PARAMETERIZED_COMMAND_LINE_FLAGS_SWITCHES:
return [methods['value']]
elif clazz == _PARAMETERIZED_COMMAND_LINE_FLAGS:
list_of_switches = []
for annotation in methods['value']:
for clazz, methods in annotation.iteritems():
list_of_switches += _annotationToSwitches(clazz, methods)
return list_of_switches
else:
return []
def _setTestFlags(test, flags):
if flags:
test['flags'] = flags
elif 'flags' in test:
del test['flags']
new_tests = []
for t in tests:
annotations = t['annotations']
list_of_switches = []
_checkParameterization(annotations)
if _SKIP_PARAMETERIZATION not in annotations:
for clazz, methods in annotations.iteritems():
list_of_switches += _annotationToSwitches(clazz, methods)
if list_of_switches:
_setTestFlags(t, _switchesToFlags(list_of_switches[0]))
for p in list_of_switches[1:]:
parameterized_t = copy.copy(t)
_setTestFlags(parameterized_t, _switchesToFlags(p))
new_tests.append(parameterized_t)
return tests + new_tests
def GetDriverEnvironmentVars(
self, test_list=None, test_list_file_path=None):
env = {
_EXTRA_DRIVER_TARGET_PACKAGE: self.test_package,
_EXTRA_DRIVER_TARGET_CLASS: self.junit3_runner_class,
_EXTRA_TIMEOUT_SCALE: self._timeout_scale,
}
if test_list:
env[_EXTRA_DRIVER_TEST_LIST] = ','.join(test_list)
if test_list_file_path:
env[_EXTRA_DRIVER_TEST_LIST_FILE] = (
os.path.basename(test_list_file_path))
return env
@staticmethod
def ParseAmInstrumentRawOutput(raw_output):
return ParseAmInstrumentRawOutput(raw_output)
@staticmethod
def GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms,
device_abi, symbolizer):
return GenerateTestResults(result_code, result_bundle, statuses,
start_ms, duration_ms, device_abi, symbolizer)
#override
def TearDown(self):
self.symbolizer.CleanUp()
if self._deobfuscator:
self._deobfuscator.Close()
self._deobfuscator = None
|
py | b4065e891f117aa53f0e47b2d8686b3aa1307ce4 | from AppKit import *
from PyObjCTools import AppHelper
objc.setVerbose(1)
# Import all submodules, to make sure all
# classes are known to the runtime
import CalendarMatrix
import InfoWindowController
import SelectionNotifyMatrix
import ToDoCell
import ToDoDocument
import ToDoItem
import TodoAppDelegate
AppHelper.runEventLoop()
|
py | b4065ec6ad9184389f3147380d676a8abf833d24 |
from artists import ArtistList, Artist
from albums import AlbumList, Album
from tracks import TrackList, Track, TrackAudio
from server import Aura
from stats import Stats
|
py | b4065f9da5e71944ee15bb5cfe1676e1f7ac3ad8 | import pytest
from pyhamtools.frequency import freq_to_band
from pyhamtools.consts import LookupConventions as const
class Test_utils_freq_to_band():
def test_hf_frequencies(self):
assert freq_to_band(137) == {"band" : 2190, "mode":const.CW}
assert freq_to_band(1805) == {"band" : 160, "mode":const.CW}
assert freq_to_band(1838) == {"band" : 160, "mode":const.DIGITAL}
assert freq_to_band(1870) == {"band" : 160, "mode":const.LSB}
assert freq_to_band(3500) == {"band" : 80, "mode":const.CW}
assert freq_to_band(3580) == {"band" : 80, "mode":const.DIGITAL}
assert freq_to_band(3799) == {"band" : 80, "mode":const.LSB}
assert freq_to_band(5200) == {"band" : 60, "mode":None}
assert freq_to_band(7000) == {"band" : 40, "mode":const.CW}
assert freq_to_band(7044) == {"band" : 40, "mode":const.DIGITAL}
assert freq_to_band(7139) == {"band" : 40, "mode":const.LSB}
assert freq_to_band(10100) == {"band" : 30, "mode":const.CW}
assert freq_to_band(10141) == {"band" : 30, "mode":const.DIGITAL}
assert freq_to_band(14000) == {"band" : 20, "mode":const.CW}
assert freq_to_band(14070) == {"band" : 20, "mode":const.DIGITAL}
assert freq_to_band(14349) == {"band" : 20, "mode":const.USB}
assert freq_to_band(18068) == {"band" : 17, "mode":const.CW}
assert freq_to_band(18096) == {"band" : 17, "mode":const.DIGITAL}
assert freq_to_band(18250) == {"band" : 17, "mode":const.USB}
assert freq_to_band(21000) == {"band" : 15, "mode":const.CW}
assert freq_to_band(21070) == {"band" : 15, "mode":const.DIGITAL}
assert freq_to_band(21449) == {"band" : 15, "mode":const.USB}
assert freq_to_band(24890) == {"band" : 12, "mode":const.CW}
assert freq_to_band(24916) == {"band" : 12, "mode":const.DIGITAL}
assert freq_to_band(24965) == {"band" : 12, "mode":const.USB}
assert freq_to_band(28000) == {"band" : 10, "mode":const.CW}
assert freq_to_band(28070) == {"band" : 10, "mode":const.DIGITAL}
assert freq_to_band(28500) == {"band" : 10, "mode":const.USB}
assert freq_to_band(50000) == {"band" : 6, "mode":const.CW}
assert freq_to_band(50100) == {"band" : 6, "mode":const.USB}
assert freq_to_band(50500) == {"band" : 6, "mode":const.DIGITAL}
def test_vhf_frequencies(self):
assert freq_to_band(70001) == {"band" : 4, "mode":None}
assert freq_to_band(144000) == {"band" : 2, "mode":const.CW}
assert freq_to_band(144150) == {"band" : 2, "mode":const.USB}
assert freq_to_band(144400) == {"band" : 2, "mode":None}
assert freq_to_band(220000) == {"band" : 1.25, "mode":None}
def test_uhf_frequencies(self):
assert freq_to_band(420000) == {"band" : 0.7, "mode":None}
assert freq_to_band(902000) == {"band" : 0.33, "mode":None}
assert freq_to_band(1200000) == {"band" : 0.23, "mode":None}
def test_shf_frequencies(self):
assert freq_to_band(2390000) == {"band" : 0.13, "mode":None}
assert freq_to_band(3300000) == {"band" : 0.09, "mode":None}
assert freq_to_band(5650000) == {"band" : 0.053, "mode":None}
assert freq_to_band(10000000) == {"band" : 0.03, "mode":None}
assert freq_to_band(24000000) == {"band" : 0.0125, "mode":None}
assert freq_to_band(47000000) == {"band" : 0.0063, "mode":None}
with pytest.raises(KeyError):
freq_to_band(16304) |
py | b406604e621faa9a9bf536b1d8d2c034c3d539af | times = ['internacional', 'vasco', 'atletico-MG', 'palmeiras', 'são paulo'
, 'santos', 'fluminence', 'bahia', 'grêmio', 'Atletico-PR', 'botafogo', 'bragantino'
, 'flamengo', 'corinthias', 'goias', 'fortalesa', 'atletico-GO', 'sport', 'ceara', 'coritiba']
print(f'lista de times do brasileirão de 2020 : {times}')
print('='*35)
print(f'os cincos primeiros : {times[:5]}')
print('='*35)
print(f'os quatros ultimos : {times[16:]}')
print('='*35)
print(f'em ordem alfabetica : {sorted(times)}')
print('='*35)
print(f'e o corinthias estar em {times.index("corinthias")+1}° posição')
|
py | b40662664ee983b08c1c6c38a9ecfb3cc0f6c49c | import pytest_check as check
from shapely.geometry import Polygon
from sectionproperties.pre.geometry import Geometry
import sectionproperties.pre.library.primitive_sections as sections
import sectionproperties.pre.library.steel_sections as steel_sections
from sectionproperties.analysis.section import Section
from sectionproperties.tests.helper_functions import validate_properties
# Setup for angle section
angle = steel_sections.angle_section(d=150, b=90, t=12, r_r=10, r_t=5, n_r=8)
angle.create_mesh(mesh_sizes=2.5)
angle_section = Section(angle)
angle_section.calculate_geometric_properties()
angle_section.calculate_plastic_properties()
angle_section.calculate_warping_properties()
def test_angle_all_properties():
check.almost_equal(angle_section.section_props.area, 2747.059)
check.almost_equal(angle_section.section_props.perimeter, 471.3501)
check.almost_equal(angle_section.section_props.cx, 2.122282e1)
check.almost_equal(angle_section.section_props.cy, 5.098127e1)
check.almost_equal(angle_section.section_props.ixx_g, 1.342632e7)
check.almost_equal(angle_section.section_props.iyy_g, 2.955753e6)
check.almost_equal(angle_section.section_props.ixy_g, 1.086603e6)
check.almost_equal(angle_section.section_props.ixx_c, 6.286470e6)
check.almost_equal(angle_section.section_props.iyy_c, 1.718455e6)
check.almost_equal(angle_section.section_props.ixy_c, -1.885622e6)
check.almost_equal(angle_section.section_props.zxx_plus, 6.348769e4)
check.almost_equal(angle_section.section_props.zxx_minus, 1.233094e5)
check.almost_equal(angle_section.section_props.zyy_plus, 2.498584e04)
check.almost_equal(angle_section.section_props.zyy_minus, 8.097207e4)
check.almost_equal(angle_section.section_props.rx_c, 4.783761e1)
check.almost_equal(angle_section.section_props.ry_c, 2.501124e1)
check.almost_equal(angle_section.section_props.i11_c, 6.964263e6)
check.almost_equal(angle_section.section_props.i22_c, 1.040662e6)
check.almost_equal(angle_section.section_props.phi, -1.602289e2)
check.almost_equal(angle_section.section_props.z11_plus, 9.775662e4)
check.almost_equal(angle_section.section_props.z11_minus, 6.939239e4)
check.almost_equal(angle_section.section_props.z22_plus, 2.796211e4)
check.almost_equal(angle_section.section_props.z22_minus, 2.076613e4)
check.almost_equal(angle_section.section_props.r11_c, 5.035048e1)
check.almost_equal(angle_section.section_props.r22_c, 1.946350e1)
check.almost_equal(angle_section.section_props.sxx, 1.135392e5)
check.almost_equal(
angle_section.section_props.syy, 4.572267e4
) # Altered from 4.572269e4
check.almost_equal(angle_section.section_props.sf_xx_plus, 1.788366)
check.almost_equal(angle_section.section_props.sf_xx_minus, 9.207672e-1)
check.almost_equal(
angle_section.section_props.sf_yy_plus, 1.829943
) # Altered from 1.829944
check.almost_equal(
angle_section.section_props.sf_yy_minus, 5.646721e-1
) # Altered from 5.646723e-1
check.almost_equal(angle_section.section_props.s11, 1.210275e5)
check.almost_equal(angle_section.section_props.s22, 4.376054e4)
check.almost_equal(angle_section.section_props.sf_11_plus, 1.238049)
check.almost_equal(angle_section.section_props.sf_11_minus, 1.744103)
check.almost_equal(angle_section.section_props.sf_22_plus, 1.564994)
check.almost_equal(angle_section.section_props.sf_22_minus, 2.107303)
check.almost_equal(
angle_section.section_props.j, 1.354663e5
) # Altered from 1.354663e5
check.almost_equal(angle_section.section_props.gamma, 162220735.49)
check.almost_equal(angle_section.section_props.A_s11, 8.855951e2)
check.almost_equal(angle_section.section_props.A_s22, 1.460240e3)
check.almost_equal(angle_section.section_props.x11_se, 2.870404e1)
check.almost_equal(angle_section.section_props.y22_se, 3.522141e1)
# Setup custom section
custom_geom_points = [
[-10, 0],
[110, 0],
[100, 10],
[55, 10],
[55, 90],
[100, 90],
[110, 100],
[110, 110],
[-10, 110],
[-10, 100],
[0, 90],
[45, 90],
[45, 10],
[-10, 10],
]
custom_geom = Geometry(Polygon(custom_geom_points))
custom_geom.create_mesh(mesh_sizes=5)
custom_section = Section(custom_geom)
custom_section.calculate_geometric_properties()
custom_section.calculate_plastic_properties()
custom_section.calculate_warping_properties()
def test_custom_section_all_properties():
check.almost_equal(custom_section.section_props.area, 4250)
check.almost_equal(custom_section.section_props.cx, 4.933333e1)
check.almost_equal(custom_section.section_props.cy, 6.501961e1)
check.almost_equal(custom_section.section_props.ixx_g, 2.567250e7)
check.almost_equal(custom_section.section_props.iyy_g, 1.418583e7)
check.almost_equal(custom_section.section_props.ixy_g, 1.379792e7)
check.almost_equal(custom_section.section_props.ixx_c, 7.705415e6)
check.almost_equal(custom_section.section_props.iyy_c, 3.842278e6)
check.almost_equal(custom_section.section_props.ixy_c, 1.654722e5)
check.almost_equal(custom_section.section_props.zxx_plus, 1.713061e5)
check.almost_equal(custom_section.section_props.zxx_minus, 1.185091e5)
check.almost_equal(custom_section.section_props.zyy_plus, 6.333425e4)
check.almost_equal(custom_section.section_props.zyy_minus, 6.475749e4)
check.almost_equal(custom_section.section_props.rx_c, 4.257979e01)
check.almost_equal(custom_section.section_props.ry_c, 3.006768e1)
check.almost_equal(custom_section.section_props.phi, -2.448209)
check.almost_equal(custom_section.section_props.i11_c, 7.712490e6)
check.almost_equal(custom_section.section_props.i22_c, 3.835203e6)
check.almost_equal(custom_section.section_props.z11_plus, 1.622630e5)
check.almost_equal(custom_section.section_props.z11_minus, 1.142680e5)
check.almost_equal(custom_section.section_props.z22_plus, 6.050295e4)
check.almost_equal(custom_section.section_props.z22_minus, 6.266613e4)
check.almost_equal(custom_section.section_props.r11_c, 4.259934e1)
check.almost_equal(custom_section.section_props.r22_c, 3.003998e1)
check.almost_equal(custom_section.section_props.sxx, 1.531971e5)
check.almost_equal(custom_section.section_props.syy, 1.014943e5)
check.almost_equal(
custom_section.get_sf(), (8.942884e-01, 1.292703, 1.602519, 1.567298)
)
check.almost_equal(custom_section.section_props.s11, 1.533463e5)
check.almost_equal(custom_section.section_props.s22, 1.015010e5)
check.almost_equal(custom_section.section_props.sf_11_plus, 9.450478e-1)
check.almost_equal(custom_section.section_props.sf_11_minus, 1.341988)
check.almost_equal(custom_section.section_props.sf_22_plus, 1.677621)
check.almost_equal(custom_section.section_props.sf_22_minus, 1.619711)
check.almost_equal(custom_section.section_props.j, 3.477399e5)
check.almost_equal(custom_section.section_props.gamma, 7.532929e9)
check.almost_equal(custom_section.section_props.A_s11, 2.945692e3)
check.almost_equal(custom_section.section_props.A_s22, 9.564143e2)
check.almost_equal(custom_section.section_props.x11_se, 1.916270)
check.almost_equal(custom_section.section_props.y22_se, 3.017570)
|
py | b406628491324e25fb77400961182f153fcfdacb | '''
#This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs. </br>
90s/epoch on Intel i5 2.4Ghz CPU. </br>
10s/epoch on Tesla K40 GPU.
'''
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# Fix cuDNN initialization
# https://github.com/tensorflow/tensorflow/issues/24828
try:
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
except:
pass
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
py | b4066298180b727815ba568e10b58926f0ff5435 | # This script outputs relevant system environment info
# Run it with `python collect_env.py`.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import subprocess
import sys
import time
import datetime
import os
from collections import namedtuple
try:
import torch
TORCH_AVAILABLE = True
except (ImportError, NameError, AttributeError):
TORCH_AVAILABLE = False
PY3 = sys.version_info >= (3, 0)
# System Environment Information
SystemEnv = namedtuple('SystemEnv', [
'torch_version',
'is_debug_build',
'cuda_compiled_version',
'gcc_version',
'cmake_version',
'os',
'python_version',
'is_cuda_available',
'cuda_runtime_version',
'nvidia_driver_version',
'nvidia_gpu_models',
'cudnn_version',
'pip_version', # 'pip' or 'pip3'
'pip_packages',
'conda_packages',
])
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
if PY3:
output = output.decode("utf-8")
err = err.decode("utf-8")
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc is not 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc is not 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def get_conda_packages(run_lambda):
if get_platform() == 'win32':
grep_cmd = r'findstr /R "torch soumith mkl magma"'
else:
grep_cmd = r'grep "torch\|soumith\|mkl\|magma"'
out = run_and_read_all(run_lambda, 'conda list | ' + grep_cmd)
if out is None:
return out
# Comment starting at beginning of line
comment_regex = re.compile(r'^#.*\n')
return re.sub(comment_regex, '', out)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
def get_nvidia_driver_version(run_lambda):
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
def get_gpu_info(run_lambda):
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
rc, out, _ = run_lambda(smi + ' -L')
if rc is not 0:
return None
# Anonymize GPUs by removing their UUID
return re.sub(uuid_regex, '', out)
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', r'V(.*)$')
def get_cudnn_version(run_lambda):
"""This will return a list of libcudnn.so; it's hard to tell which one is being used"""
if get_platform() == 'win32':
cudnn_cmd = 'where /R "%CUDA_PATH%\\bin" cudnn*.dll'
else:
cudnn_cmd = 'find /usr/local /usr/lib -type f -name "libcudnn*" 2> /dev/null'
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0:
return None
if rc != 1 and rc != 0:
return None
# Alphabetize the result because the order is non-deterministic otherwise
result = '\n'.join(sorted(out.split('\n')))
return 'Probably one of the following:\n{}'.format(result)
def get_nvidia_smi():
smi = 'nvidia-smi'
if get_platform() == 'win32':
smi = '"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi
return smi
def get_platform():
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32'):
return 'win32'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
elif sys.platform.startswith('darwin'):
return 'darwin'
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
def get_windows_version(run_lambda):
return run_and_read_all(run_lambda, 'wmic os get Caption | findstr /v Caption')
def get_lsb_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
r'PRETTY_NAME="(.*)"')
def get_os(run_lambda):
platform = get_platform()
if platform is 'win32' or platform is 'cygwin':
return get_windows_version(run_lambda)
if platform == 'darwin':
version = get_mac_version(run_lambda)
if version is None:
return None
return 'Mac OSX {}'.format(version)
if platform == 'linux':
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return desc
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return desc
return platform
# Unknown platform
return platform
def get_pip_packages(run_lambda):
# People generally have `pip` as `pip` or `pip3`
def run_with_pip(pip):
if get_platform() == 'win32':
grep_cmd = r'findstr /R "numpy torch"'
else:
grep_cmd = r'grep "torch\|numpy"'
return run_and_read_all(run_lambda, pip + ' list --format=legacy | ' + grep_cmd)
if not PY3:
return 'pip', run_with_pip('pip')
# Try to figure out if the user is running pip or pip3.
out2 = run_with_pip('pip')
out3 = run_with_pip('pip3')
num_pips = len([x for x in [out2, out3] if x is not None])
if num_pips is 0:
return 'pip', out2
if num_pips == 1:
if out2 is not None:
return 'pip', out2
return 'pip3', out3
# num_pips is 2. Return pip3 by default b/c that most likely
# is the one associated with Python 3
return 'pip3', out3
def get_env_info():
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if TORCH_AVAILABLE:
version_str = torch.__version__
debug_mode_str = torch.version.debug
cuda_available_str = torch.cuda.is_available()
cuda_version_str = torch.version.cuda
else:
version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
return SystemEnv(
torch_version=version_str,
is_debug_build=debug_mode_str,
python_version='{}.{}'.format(sys.version_info[0], sys.version_info[1]),
is_cuda_available=cuda_available_str,
cuda_compiled_version=cuda_version_str,
cuda_runtime_version=get_running_cuda_version(run_lambda),
nvidia_gpu_models=get_gpu_info(run_lambda),
nvidia_driver_version=get_nvidia_driver_version(run_lambda),
cudnn_version=get_cudnn_version(run_lambda),
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=get_conda_packages(run_lambda),
os=get_os(run_lambda),
gcc_version=get_gcc_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
)
env_info_fmt = """
PyTorch version: {torch_version}
Is debug build: {is_debug_build}
CUDA used to build PyTorch: {cuda_compiled_version}
OS: {os}
GCC version: {gcc_version}
CMake version: {cmake_version}
Python version: {python_version}
Is CUDA available: {is_cuda_available}
CUDA runtime version: {cuda_runtime_version}
GPU models and configuration: {nvidia_gpu_models}
Nvidia driver version: {nvidia_driver_version}
cuDNN version: {cudnn_version}
Versions of relevant libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement='Could not collect'):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true='Yes', false='No'):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag='[prepend]'):
lines = text.split('\n')
updated_lines = [tag + line for line in lines]
return '\n'.join(updated_lines)
def replace_if_empty(text, replacement='No relevant packages'):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split('\n')) > 1:
return '\n{}\n'.format(string)
return string
mutable_dict = envinfo._asdict()
# If nvidia_gpu_models is multiline, start on the next line
mutable_dict['nvidia_gpu_models'] = \
maybe_start_on_next_line(envinfo.nvidia_gpu_models)
# If the machine doesn't have CUDA, report some fields as 'No CUDA'
dynamic_cuda_fields = [
'cuda_runtime_version',
'nvidia_gpu_models',
'nvidia_driver_version',
]
all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
all_dynamic_cuda_fields_missing = all(
mutable_dict[field] is None for field in dynamic_cuda_fields)
if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
for field in all_cuda_fields:
mutable_dict[field] = 'No CUDA'
if envinfo.cuda_compiled_version is None:
mutable_dict['cuda_compiled_version'] = 'None'
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
# If either of these are '', replace with 'No relevant packages'
mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict['pip_packages']:
mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
'[{}] '.format(envinfo.pip_version))
if mutable_dict['conda_packages']:
mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
'[conda] ')
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
output = get_pretty_env_info()
print(output)
if __name__ == '__main__':
main()
|
py | b40662e87f3acaeb1da69a3530625cb57e6f882c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 20:43:59 2020
@author: darp_lord
"""
import os
import numpy as np
from newsapi import NewsApiClient
import gensim
from gensim.corpora import Dictionary
from gensim.models import LdaMulticore, TfidfModel
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer
from gensim.test.utils import datapath
MODEL_DIR="./models/"
lemma = WordNetLemmatizer()
def checkdir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def lemma_pp(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in STOPWORDS:
result.append(lemma.lemmatize(token))
return result
def updateLDA():
api_file="./newsapi.key"
categories=['business', 'entertainment', 'general', 'health', 'science', 'sports', 'technology']
with open(api_file,"r") as apikey:
newsapi=NewsApiClient(api_key=apikey.read().strip())
headlines={cat:newsapi.get_top_headlines(category=cat, language='en', country='in') for cat in categories}
pp_docs=[]
for category in headlines:
for article in headlines[category]['articles']:
#print(lemma_pp(article['title']))
pp_docs.append(lemma_pp(article['title']))
if os.path.exists(MODEL_DIR+"corpus_dict.model"):
corp_d=Dictionary.load(MODEL_DIR+"corpus_dict.model")
corp_d.add_documents(pp_docs)
else:
corp_d = Dictionary(pp_docs)
corp_d.filter_extremes(no_below=2, no_above=0.5)
dtm=[corp_d.doc2bow(doc) for doc in pp_docs]
tfidf=TfidfModel(dtm)
corp_tfidf=tfidf[dtm]
lda = LdaMulticore(corp_tfidf, num_topics=5, id2word=corp_d, passes=60, workers=3)
print(lda.print_topics(num_topics=5, num_words=5))
checkdir(MODEL_DIR)
corp_d.save(MODEL_DIR+"corpus_dict.model")
#corp_tfidf.save(MODEL_DIR+"corpus_tfidf.model")
lda.save(MODEL_DIR+"lda.model")
def getLDA(topics):
corp_d=Dictionary.load(MODEL_DIR+"corpus_dict.model")
lda=LdaMulticore.load(MODEL_DIR+"lda.model")
pp_docs=[]
for topic in topics:
pp_docs.append(lemma_pp(topic))
dtm=[corp_d.doc2bow(doc) for doc in pp_docs]
tfidf=TfidfModel(dtm)
corp_tfidf=tfidf[dtm]
return list(lda[tfidf[dtm]])
if __name__=="__main__":
updateLDA()
print(getLDA(["Prime Minister Ardern says New Zealand has won \"battle\" against community spread of coronavirus - CBS News",
"LG Velvet specs revealed: 48-megapixel camera, optional dual screen",
"A big asteroid will fly by Earth Wednesday, but don't panic. It won't hit us"]))
|
py | b40663491d933fe4b315abde28db1c0358b0449e | import sys
import numpy as np
#import cv2
#sys.path.append('/home/ibu6429/shared_file_system/code/math_functions/')
try:
from algebra import Algebra
except ImportError:
from localseg.data_generators.algebra import Algebra
def extractEquirectangular(wrap,source_image,result_image,euler_angles):
extractEquirectangular_quick(wrap,source_image,result_image,Algebra.rotation_matrix(euler_angles))
def extractEquirectangular_quick(wrap, source_image, result_image,R):
# First dimension: Yaw
# First dimension: Roll
# Third dimenstion: Pitch
Algebra.test_polar_to_polar()
min_theta = np.pi * 4
max_theta = -np.pi * 4;
min_psi = np.pi * 4;
max_psi = -np.pi * 4;
result_image[:,:,0]=255
width=result_image.shape[1]
height=result_image.shape[0]
row,col = np.mgrid[0:height,0:width]
polar_point=np.zeros((height,width,3))
polar_point [:,:,0]=1.0
polar_point[:,:,1] = (row / (1.0 * height))*np.pi
polar_point[:,:,2]= ((col - width // 2) / (0.5 * width)) *np.pi
max_1=np.max(np.max(polar_point[:,:,1]))
min_1=np.min(np.min(polar_point[:,:,1]))
abs_max_2=np.max(np.max(np.abs(polar_point[:,:,2])))
#print('max 1 min 1 absmax2 ' +str(max_1)+ ' '+str(min_1)+' ' +str(abs_max_2))
#assert(max_1 <= np.pi) and(min_1>=0)and(abs_max_2<=np.pi) #disabled for speed
plane_point = Algebra.polar_to_cartesian_array(polar_point)
# assert( np.max(np.max(np.abs(Algebra.magnitude_array(plane_point)-polar_point[:,:,0]))) < 0.0001) #disabled for speed
plane_rot=Algebra.rotate_array(R,plane_point)
#assert(np.max(np.max(np.abs(Algebra.magnitude_array(plane_point) - Algebra.magnitude_array(plane_rot)))) < 0.0001) #disbled for speed
eq_row,eq_col=Algebra.cartesian_to_polar_quantised_array(wrap,plane_rot,width,height)
#assert ((np.min(np.min(eq_row))>=0) and (np.max(np.max(eq_row))<height)and (np.min(np.min(eq_col))>=0) and (np.max(np.max(eq_row))<width)) #disabled for speed
result_image[row, col, :] = source_image[ eq_row ,eq_col, :]
|
py | b40664533c1979deb9587d820822a0a76f7dcfe2 | from .equity_pricing import USEquityPricing
from .dataset import DataSet, Column, BoundColumn
__all__ = [
'BoundColumn',
'Column',
'DataSet',
'USEquityPricing',
]
|
py | b406648d63d1023982c6f34c9856c1993ad32d01 | """Download variation data from dbSNP and install within directory structure.
Uses Broad's GATK resource bundles:
http://www.broadinstitute.org/gsa/wiki/index.php/GATK_resource_bundle
Retrieves dbSNP plus training data for variant recalibration:
- dbsnp_132.hg19.vcf.gz
- hapmap_3.3.hg19.sites.vcf
- 1000G_omni2.5.hg19.sites.vcf
- Mills_and_1000G_gold_standard.indels.hg19.sites.vcf
For MuTect and cancer calling:
- cosmic
For structural variant calling and SNP/indel filtering
- low complexity regions
- centromere and telomere regions
"""
import os
from fabric.api import env
from fabric.contrib.files import cd
from cloudbio.custom import shared
def download_dbsnp(genomes, bundle_version, dbsnp_version):
"""Download and install dbSNP variation data for supplied genomes.
"""
folder_name = "variation"
genome_dir = os.path.join(env.data_files, "genomes")
for (orgname, gid, manager) in ((o, g, m) for (o, g, m) in genomes
if m.config.get("dbsnp", False)):
vrn_dir = os.path.join(genome_dir, orgname, gid, folder_name)
if not env.safe_exists(vrn_dir):
env.safe_run('mkdir -p %s' % vrn_dir)
with cd(vrn_dir):
if gid in ["GRCh37", "hg19"]:
_dbsnp_human(env, gid, manager, bundle_version, dbsnp_version)
elif gid in ["mm10", "canFam3"]:
_dbsnp_custom(env, gid)
elif manager.data_source is "Ensembl":
_ensembl_vcf(env, gid, manager)
def _ensembl_vcf(env, gid, manager):
"""Fetch ensemble vcf file (available from release 71) and do tabix indexing
"""
fname = "%s.vcf.gz" % (manager._organism)
download_url = manager._base_url
section = "variation/"
if not manager._section is "standard":
section = ""
fname = fname.lower()
download_url += "release-%s/%svcf/%s/%s" % (manager._release_number,
section, manager._organism.lower(), fname)
if not env.safe_exists(fname):
shared._remote_fetch(env, download_url)
env.safe_run("tabix -f -p vcf %s" % fname)
def _dbsnp_custom(env, gid):
"""Retrieve resources for dbsnp builds from custom S3 biodata bucket.
"""
remote_dir = "https://s3.amazonaws.com/biodata/variants/"
files = {"mm10": ["mm10-dbSNP-2013-09-12.vcf.gz"],
"canFam3": ["canFam3-dbSNP-2014-05-10.vcf.gz"]}
for f in files[gid]:
for ext in ["", ".tbi"]:
fname = f + ext
if not env.safe_exists(fname):
shared._remote_fetch(env, "%s%s" % (remote_dir, fname))
def _dbsnp_human(env, gid, manager, bundle_version, dbsnp_version):
"""Retrieve resources for human variant analysis from Broad resource bundles.
"""
to_download = ["dbsnp_{ver}".format(ver=dbsnp_version),
"hapmap_3.3",
"1000G_omni2.5",
"1000G_phase1.snps.high_confidence",
"Mills_and_1000G_gold_standard.indels"]
for dl_name in to_download:
for ext in [""]:
_download_broad_bundle(manager.dl_name, bundle_version, dl_name, ext)
_download_cosmic(gid)
_download_repeats(gid)
_download_dbnsfp(env, gid, manager.config)
_download_ancestral(env, gid, manager.config)
# XXX Wait to get this by default until it is used more widely
#_download_background_vcf(gid)
def _download_broad_bundle(gid, bundle_version, name, ext):
broad_fname = "{name}.{gid}.vcf{ext}".format(gid=gid, name=name, ext=ext)
fname = broad_fname.replace(".{0}".format(gid), "").replace(".sites", "") + ".gz"
base_url = "ftp://gsapubftp-anonymous:@ftp.broadinstitute.org/bundle/" + \
"{bundle}/{gid}/{fname}.gz".format(
bundle=bundle_version, fname=broad_fname, gid=gid)
# compress and prepare existing uncompressed versions
if env.safe_exists(fname.replace(".vcf.gz", ".vcf")):
env.safe_run("bgzip %s" % fname.replace(".vcf.gz", ".vcf"))
env.safe_run("tabix -f -p vcf %s" % fname)
# otherwise, download and bgzip and tabix index
if not env.safe_exists(fname):
out_file = shared._remote_fetch(env, base_url, allow_fail=True)
if out_file:
env.safe_run("gunzip -c %s | bgzip -c > %s" % (out_file, fname))
env.safe_run("tabix -f -p vcf %s" % fname)
env.safe_run("rm -f %s" % out_file)
else:
env.logger.warn("dbSNP resources not available for %s" % gid)
# clean up old files
for ext in [".vcf", ".vcf.idx"]:
if env.safe_exists(fname.replace(".vcf.gz", ext)):
env.safe_run("rm -f %s" % (fname.replace(".vcf.gz", ext)))
return fname
def _download_cosmic(gid):
"""Prepared versions of COSMIC, pre-sorted and indexed.
utils/prepare_cosmic.py handles the work of creating the VCFs from standard
COSMIC resources.
"""
base_url = "https://s3.amazonaws.com/biodata/variants"
version = "v68"
supported = ["hg19", "GRCh37"]
if gid in supported:
url = "%s/cosmic-%s-%s.vcf.gz" % (base_url, version, gid)
fname = os.path.basename(url)
if not env.safe_exists(fname):
shared._remote_fetch(env, url)
if not env.safe_exists(fname + ".tbi"):
shared._remote_fetch(env, url + ".tbi")
def _download_dbnsfp(env, gid, gconfig):
"""Download and prepare dbNSFP functional prediction resources if configured.
Feeds into VEP for annotating VCF files:
https://sites.google.com/site/jpopgen/dbNSFP
https://github.com/ensembl-variation/VEP_plugins/blob/master/dbNSFP.pm
"""
version = "2.5"
url = "http://dbnsfp.houstonbioinformatics.org/dbNSFPzip/dbNSFPv%s.zip" % version
if gconfig.get("dbnsfp"):
outfile = "dbNSFP_v%s.gz" % (version)
if gid == "GRCh37": # download and prepare bgzipped output file
if not env.safe_exists(outfile):
zipfile = shared._remote_fetch(env, url, samedir=True)
outdir = "dbNSFPv%s" % version
env.safe_run("mkdir -p %s" % outdir)
env.safe_run("unzip %s -d %s" % (zipfile, outdir))
env.safe_run("cat %s/dbNSFP*_variant.chr* | bgzip -c > %s" % (outdir, outfile))
env.safe_run("rm -f %s/* && rmdir %s" % (outdir, outdir))
env.safe_run("rm -f %s" % (zipfile))
if not env.safe_exists(outfile + ".tbi"):
env.safe_run("tabix -s 1 -b 2 -e 2 -c '#' %s" % outfile)
elif gid == "hg19": # symlink to GRCh37 download
if not env.safe_exists(outfile):
env.safe_run("ln -sf ../../GRCh37/variation/%s %s" % (outfile, outfile))
if not env.safe_exists(outfile + ".tbi"):
env.safe_run("ln -sf ../../GRCh37/variation/%s.tbi %s.tbi" % (outfile, outfile))
def _download_ancestral(env, gid, gconfig):
"""Download ancestral genome sequence for loss of function evaluation.
Used by LOFTEE VEP plugin: https://github.com/konradjk/loftee
"""
base_url = "http://www.broadinstitute.org/~konradk/loftee/human_ancestor.fa.rz"
if gid == "GRCh37":
for ext in ["", ".fai"]:
outfile = os.path.basename(base_url) + ext
if not env.safe_exists(outfile):
shared._remote_fetch(env, base_url + ext, samedir=True)
elif gid == "hg19": # symlink to GRCh37 download
for ext in ["", ".fai"]:
outfile = os.path.basename(base_url) + ext
if not env.safe_exists(outfile):
env.safe_run("ln -sf ../../GRCh37/variation/%s %s" % (outfile, outfile))
def _download_background_vcf(gid):
"""Download background file of variant to use in calling.
"""
base_url = "https://s3.amazonaws.com/biodata/variants"
base_name = "background-diversity-1000g.vcf"
if gid in ["GRCh37"] and not env.safe_exists("{0}.gz".format(base_name)):
for ext in ["gz", "gz.tbi"]:
shared._remote_fetch(env, "{0}/{1}.{2}".format(base_url, base_name, ext))
def _download_repeats(gid):
_download_sv_repeats(gid)
_download_lcrs(gid)
def _download_sv_repeats(gid):
"""Retrieve telomere and centromere exclusion regions for structural variant calling.
From Delly: https://github.com/tobiasrausch/delly
"""
mere_url = "https://raw.githubusercontent.com/chapmanb/delly/master/human.hg19.excl.tsv"
out_file = "sv_repeat_telomere_centromere.bed"
if not env.safe_exists(out_file):
def _select_by_gid(env, orig_file):
if gid == "hg19":
env.safe_run("grep ^chr %s > %s" % (orig_file, out_file))
else:
assert gid == "GRCh37"
env.safe_run("grep -v ^chr %s > %s" % (orig_file, out_file))
return out_file
shared._remote_fetch(env, mere_url, fix_fn=_select_by_gid)
def _download_lcrs(gid):
"""Retrieve low complexity regions from Heng Li's variant analysis paper.
"""
lcr_url = "https://github.com/lh3/varcmp/raw/master/scripts/LCR-hs37d5.bed.gz"
out_file = "LCR.bed.gz"
if not env.safe_exists(out_file):
def _fix_chrom_names(env, orig_file):
if gid == "hg19":
convert_cmd = "| grep -v ^GL | grep -v ^NC | grep -v ^hs | sed 's/^/chr/'"
else:
assert gid == "GRCh37"
convert_cmd = ""
env.safe_run("zcat %s %s | bgzip -c > %s" % (orig_file, convert_cmd, out_file))
return out_file
shared._remote_fetch(env, lcr_url, fix_fn=_fix_chrom_names)
env.safe_run("tabix -p vcf -f %s" % out_file)
|
py | b40664f1ccfd800ac6ada6ca92a5654634e66251 | # Redes Neuronales Recurrentes (RNR)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 19:32:12 2020
@author: juangabriel
"""
# Parte 1 - Preprocesado de los datos
# Importación de las librerías
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importar el dataset de entrenamiento
dataset_train = pd.read_csv("datasets/Part 3 - Recurrent Neural Networks (RNN)/Google_Stock_Price_Train.csv")
training_set = dataset_train.iloc[:, 1:2].values
# Escalado de características
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Crear una estructura de datos con 60 timesteps y 1 salida
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Redimensión de los datos
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Parte 2 - Construcción de la RNR
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
# Inicialización del modelo
regressor = Sequential()
# Añadir la primera capa de LSTM y la regularización por Dropout
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1) ))
regressor.add(Dropout(0.2))
# Añadir la segunda capa de LSTM y la regularización por Dropout
regressor.add(LSTM(units = 50, return_sequences = True ))
regressor.add(Dropout(0.2))
# Añadir la tercera capa de LSTM y la regularización por Dropout
regressor.add(LSTM(units = 50, return_sequences = True ))
regressor.add(Dropout(0.2))
# Añadir la cuarta capa de LSTM y la regularización por Dropout
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Añadir la capa de salida
regressor.add(Dense(units = 1))
# Compilar la RNR
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Ajustar la RNR al conjunto de entrenamiento
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Parte 3 - Ajustar las predicciones y visualizar los resultados
# Obtener el valor de las acciones reales de Enero de 2017
dataset_test = pd.read_csv('datasets/Part 3 - Recurrent Neural Networks (RNN)/Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
# Obtener la predicción de la acción con la RNR para Enero de 2017
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualizar los Resultados
plt.plot(real_stock_price, color = 'red', label = 'Precio Real de la Accion de Google')
plt.plot(predicted_stock_price, color = 'blue', label = 'Precio Predicho de la Accion de Google')
plt.title("Prediccion con una RNR del valor de las acciones de Google")
plt.xlabel("Fecha")
plt.ylabel("Precio de la accion de Google")
plt.legend()
plt.show()
# Root mean squared error
import math
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))
|
py | b4066549534d45bb55816c2cd94e9b5e615149e0 | '''
Source : https://leetcode.com/problems/groups-of-special-equivalent-strings/
Author : Yuan Wang
Date : 2019-01-09
/**********************************************************************************
*You are given an array A of strings.
*
*Two strings S and T are special-equivalent if after any number of moves, S == T.
*
*A move consists of choosing two indices i and j with i % 2 == j % 2, and swapping
*S[i] with S[j].
*
*Now, a group of special-equivalent strings from A is a non-empty subset S of A such
*that any string not in S is not special-equivalent with any string in S.
*
*Return the number of groups of special-equivalent strings from A.
Example 1:
Input: ["a","b","c","a","c","c"]
Output: 3
Explanation: 3 groups ["a","a"], ["b"], ["c","c","c"]
Example 2:
Input: ["aa","bb","ab","ba"]
Output: 4
Explanation: 4 groups ["aa"], ["bb"], ["ab"], ["ba"]
**********************************************************************************/
'''
#Self solution, Brute Force, Time complexity:O(N^2loglogN)
from collections import Counter
def hasGroupsSizeX(deck):
"""
:type deck: List[int]
:rtype: bool
"""
if len(deck) < 2:
return False
d=Counter(deck)
test=max(d.values())
for i in range(2,test+1):
result=sum([d[count] % i for count in d])
if result == 0:
return True
return False
#Other solution, using GCD, time complexity:O(Nlog^2N)
def hasGroupsSizeXB(deck):
"""
:type deck: List[int]
:rtype: bool
"""
from math import gcd
from functools import reduce
vals = Counter(deck).values()
return reduce(gcd, vals) >= 2
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.A = [1,2,3,4,4,3,2,1]
self.B = [1,1,1,2,2,2,3,3]
def testA(self):
self.assertEqual(hasGroupsSizeX(self.A),True)
def testB(self):
self.assertEqual(hasGroupsSizeXB(self.B),False)
if __name__ == '__main__':
unittest.main() |
py | b4066729ab557f7320c61885f31c90c43544137b | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
General utils
"""
import contextlib
import glob
import logging
import math
import os
import platform
import random
import re
import shutil
import signal
import time
import urllib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from zipfile import ZipFile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from yolov5.utils.downloads import gsutil_getsize
from yolov5.utils.metrics import box_iou, fitness
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320,
formatter={'float_kind': '{:11.5g}'.format
}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(
0
) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(),
8)) # NumExpr max threads
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
def set_logging(name=None, verbose=True):
# Sets level and returns logger
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
logging.basicConfig(format="%(message)s",
level=logging.INFO if
(verbose and rank in (-1, 0)) else logging.WARNING)
return logging.getLogger(name)
LOGGER = set_logging(
__name__) # define globally (used in train.py, val.py, detect.py, etc.)
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self,
seconds,
*,
timeout_msg='',
suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
signal.signal(signal.SIGALRM,
self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
# Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def methods(instance):
# Get class/instance methods
return [
f for f in dir(instance)
if callable(getattr(instance, f)) and not f.startswith("__")
]
def print_args(name, opt):
# Print argparser arguments
LOGGER.info(
colorstr(f'{name}: ') + ', '.join(f'{k}={v}'
for k, v in vars(opt).items()))
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
# cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
import torch.backends.cudnn as cudnn
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = (False,
True) if seed == 0 else (True,
False)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {
k: v
for k, v in da.items()
if k in db and not any(x in k
for x in exclude) and v.shape == db[k].shape
}
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
# Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {
'Windows': 'AppData/Roaming',
'Linux': '.config',
'Darwin': 'Library/Application Support'
} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(),
'') # OS-specific config dir
path = (path if is_writeable(path) else Path('/tmp')
) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if test: # method 1
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except ImportError:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
return re.search('[\u4e00-\u9fff]', s)
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode(
'ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(path):
# Return file/dir size (MB)
path = Path(path)
if path.is_file():
return path.stat().st_size / 1E6
elif path.is_dir():
return sum(f.stat().st_size
for f in path.glob('**/*') if f.is_file()) / 1E6
else:
return 0.0
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443),
5) # check host accessibility
return True
except OSError:
return False
@try_except
@WorkingDirectory(ROOT)
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
print(colorstr('github: '), end='')
assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
assert not is_docker(), 'skipping check (Docker image)' + msg
assert check_online(), 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True,
timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD',
shell=True).decode().strip() # checked out
n = int(
check_output(f'git rev-list {branch}..origin/master --count',
shell=True)) # commits behind
if n > 0:
s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
def check_python(minimum='3.6.2'):
# Check current python version vs. required python version
check_version(platform.python_version(),
minimum,
name='Python ',
hard=True)
def check_version(current='0.0.0',
minimum='0.0.0',
name='version ',
pinned=False,
hard=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
if hard: # assert min requirements met
assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'
else:
return result
@try_except
def check_requirements(requirements=ROOT / 'requirements.txt',
exclude=(),
install=True):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(
), f"{prefix} {file.resolve()} not found, check failed."
with file.open() as f:
requirements = [
f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f)
if x.name not in exclude
]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install:
print(f"{s}, attempting auto-update...")
try:
assert check_online(
), f"'pip install {r}' skipped (offline)"
print(
check_output(f"pip install '{r}'",
shell=True).decode())
n += 1
except Exception as e:
print(f'{prefix} {e}')
else:
print(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
print(
f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}'
)
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(
), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(
), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(
f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}'
)
return False
def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=('.yaml', '.yml')):
# Search/download YAML file (if necessary) and return path, checking suffix
return check_file(file, suffix)
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or file == '': # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split(
'?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
print(f'Found {url} locally at {file}') # file already exists
else:
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat(
).st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'data', 'models', 'utils': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file),
recursive=True)) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(
files
) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith(
'.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data,
dir='../datasets',
unzip=True,
delete=False,
curl=False,
threads=1)
data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Parse yaml
path = extract_dir or Path(data.get('path')
or '') # optional 'path' default to '.'
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(
data[k], str) else [str(path / x) for x in data[k]]
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])
] # assign class names if missing
train, val, test, s = (data.get(x)
for x in ('train', 'val', 'test', 'download'))
if val:
val = [
Path(x).resolve()
for x in (val if isinstance(val, list) else [val])
] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' %
[str(x) for x in val if not x.exists()])
if s and autodownload: # download script
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} to {f}...')
torch.hub.download_url_to_file(s, f)
Path(root).mkdir(parents=True,
exist_ok=True) # create root
ZipFile(f).extractall(path=root) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
print(
f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n"
)
else:
raise Exception('Dataset not found.')
return data # dictionary
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(url)).name.split('?')[
0] # '%2F' to '/', split https://url.com/file.txt?auth
return file
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -"
) # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f,
progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x),
zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]
) # color arguments, string
colors = {
'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'
}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array(
[np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(
): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84,
85, 86, 87, 88, 89, 90
]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(),
y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([
np.interp(x, xp, s[:, i]) for i in range(2)
]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0],
img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (
img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros(
(0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()),
1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(
descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:,
4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n <
3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(
1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt',
s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(
f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB"
)
def print_mutation(results, hyp, save_dir, bucket):
evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5',
'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss',
'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (os.path.getsize(evolve_csv)
if os.path.exists(evolve_csv) else 0):
os.system(f'gsutil cp {url} {save_dir}'
) # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (
('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Print to screen
print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals),
end='\n\n\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :7])) #
f.write('# YOLOv5 Hyperparameter Evolution Results\n' +
f'# Best generation: {i}\n' +
f'# Last generation: {len(data)}\n' + '# ' +
', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
'# ' + ', '.join(f'{x:>20.5g}'
for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system(
f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to YOLO outputs
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('example%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0,
1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im,
dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(
1) # classifier prediction
x[i] = x[i][pred_cls1 ==
pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''),
path.suffix) if path.is_file() else (path, '')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# Variables
NCOLS = 0 if is_docker() else shutil.get_terminal_size(
).columns # terminal window size
|
py | b40668da7052604a0109fea3864fa65d7a1ec555 | from django.test import TestCase, Client
from django.test import LiveServerTestCase, TestCase, tag
from django.urls import reverse
from selenium import webdriver
from .models import Report
# Create your tests here.
@tag('functional')
class FunctionalTestCase(LiveServerTestCase):
"""Base class for functional test cases with selenium."""
@classmethod
def setUpClass(cls):
super().setUpClass()
# Change to another webdriver if desired (and update CI accordingly).
options = webdriver.chrome.options.Options()
# These options are needed for CI with Chromium.
options.headless = True # Disable GUI.
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
cls.selenium = webdriver.Chrome(options=options)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
class MainTestCase(TestCase):
def test_eksistensi_url(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# You can also use path names instead of explicit paths.
response = self.client.get(reverse('report:report'))
self.assertEqual(response.status_code, 200)
def test_eksistensi_template(self):
response = Client().get('/reportIssue/')
html_response = response.content.decode('utf8')
self.assertIn("Report Problems", html_response)
self.assertIn("Submit", html_response)
def test_eksistensi_navbar(self):
response = Client().get('/reportIssue/')
html_response = response.content.decode('utf8')
self.assertIn("Home", html_response)
self.assertIn("Donate", html_response)
self.assertIn("Donations", html_response)
self.assertIn("Testimonies", html_response)
self.assertIn("Questions", html_response)
self.assertIn("Reports", html_response)
class MainFunctionalTestCase(FunctionalTestCase):
def test_root_url_exists(self):
self.selenium.get(f'{self.live_server_url}/')
html = self.selenium.find_element_by_tag_name('html')
self.assertNotIn('not found', html.text.lower())
self.assertNotIn('error', html.text.lower())
|
py | b406696da6895b81c0caba88b31d4c11f4abcd1d | """
Problem: https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem
Author: Eda AYDIN
"""
# !/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'jumpingOnClouds' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY c as parameter.
#
def jumpingOnClouds(c):
# Write your code here
count = 0
i = 0
while i < len(c) - 1:
if i + 2 < len(c) and c[i + 2] == 0:
i += 2
else:
i += 1
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
|
py | b4066a924ad32835d4a1477e0b5368e78f8245bb | import random as rnd
class ListGraph:
def __init__(self, n):
self.n = n
self.graph = {}
for i in range(n):
self.graph[i] = []
def fill(self,saturation):
p = self.n * (self.n - 1) * saturation
while p > 0:
x = rnd.randint(0, self.n - 1)
y = rnd.randint(0, self.n - 1)
if y in self.graph[x] or x == y:
continue
self.graph[x].append(y)
#matrix[x][y] = 1
p -= 1
def dfs(self, start, path=[]):
stack = [start]
while stack:
current = stack.pop(0)
if current not in path:
path = path + [current]
stack = self.graph[current] + stack
return path
def bfs(self, start, path=[]):
queue = [start]
while queue:
vertex = queue.pop(0)
if vertex not in path:
path.append(vertex)
queue.extend(list(set( self.graph[vertex]) - set(path)))
return path
if __name__ == "__main__":
g = ListGraph(10)
g.fill()
print g.dfs(0)
print g.bfs(0)
|
py | b4066b31052c2437476f75a47435caf869360c9f | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, with_statement, division
__author__ = "Jérôme Kieffer"
__contact__ = "[email protected]"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "23/06/2016"
__status__ = "development"
import unittest
from pyFAI.test.utilstest import UtilsTest
import logging, threading
import types, os, sys
import numpy
logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
from math import ceil, floor
pyFAI = sys.modules["pyFAI"]
from pyFAI import detectors, ocl_azim_lut, _distortion, _distortionCSR, distortion
from pyFAI.utils import timeit
import fabio
# import pyFAI._distortion
# import pyFAI._distortionCSR
def test():
# workin on 256x256
# x, y = numpy.ogrid[:256, :256]
# grid = numpy.logical_or(x % 10 == 0, y % 10 == 0) + numpy.ones((256, 256), numpy.float32)
# det = detectors.FReLoN("frelon_8_8.spline")
# # working with halfccd spline
x, y = numpy.ogrid[:1024, :2048]
grid = numpy.logical_or(x % 100 == 0, y % 100 == 0) + numpy.ones((1024, 2048), numpy.float32)
splineFilePath = "halfccd.spline"
splineFile = UtilsTest.getimage(splineFilePath)
det = detectors.FReLoN(splineFile)
# working with halfccd spline
# x, y = numpy.ogrid[:2048, :2048]
# grid = numpy.logical_or(x % 100 == 0, y % 100 == 0).astype(numpy.float32) + numpy.ones((2048, 2048), numpy.float32)
# det = detectors.FReLoN("frelon.spline")
print(det, det.max_shape)
disLUT = _distortion.Distortion(det)
print(disLUT)
lut = disLUT.calc_LUT_size()
print(disLUT.lut_size)
print(lut.mean())
disLUT.calc_LUT()
outLUT = disLUT.correct(grid)
fabio.edfimage.edfimage(data=outLUT.astype("float32")).write("test_correct_LUT.edf")
print("*"*50)
print(det, det.max_shape)
disCSR = _distortionCSR.Distortion(det, foo=64)
print(disCSR)
lut = disCSR.calc_LUT_size()
print(disCSR.lut_size)
print(lut.mean())
disCSR.calc_LUT()
outCSR = disCSR.correct(grid)
fabio.edfimage.edfimage(data=outCSR.astype("float32")).write("test_correct_CSR.edf")
print("*"*50)
disCSR.setDevice()
outCSRocl = disCSR.correct(grid)
fabio.edfimage.edfimage(data=outCSRocl.astype("float32")).write("test_correct_CSR.edf")
print("*"*50)
print(det, det.max_shape)
disLUTpy = distortion.Distortion(det)
print(disLUTpy)
lut = disLUTpy.calc_LUT_size()
print(disLUTpy.lut_size)
print(lut.mean())
disLUTpy.calc_LUT()
outLUTpy = disLUTpy.correct(grid)
fabio.edfimage.edfimage(data=outLUTpy.astype("float32")).write("test_correct_LUT.edf")
print("*"*50)
# x, y = numpy.ogrid[:2048, :2048]
# grid = numpy.logical_or(x % 100 == 0, y % 100 == 0)
# det = detectors.FReLoN("frelon.spline")
# print( det, det.max_shape)
# dis = Distortion(det)
# print(dis
# lut = dis.calc_LUT_size()
# print(dis.lut_size
# print("LUT mean & max", lut.mean(), lut.max()
# dis.calc_LUT()
# out = dis.correct(grid)
# fabio.edfimage.edfimage(data=out.astype("float32")).write("test2048.edf")
import pylab
# pylab.imshow(outLUT)
# pylab.show()
# pylab.imshow(outCSR) # , interpolation="nearest")
# , interpolation="nearest")
# pylab.show()
pylab.imshow(outCSRocl)
pylab.show()
# pylab.imshow(outLUTpy)
# pylab.show()
assert numpy.allclose(outLUT, outCSRocl)
if __name__ == "__main__":
det = dis = lut = None
test()
|
py | b4066b65ab21339ef34e363261fe22a26ff5dde7 | __version__ = "2.8.1"
|
py | b4066d569737e671566b7f4e7841c51a96260fd6 | from django.db import models
from django.db.models.fields import BigIntegerField, CharField
from django.utils import timezone
# Create your models here.
class APICounter(models.Model):
endpoint = CharField(max_length=50)
class APIHits(models.Model):
count = BigIntegerField()
|
py | b4066dd972380979d0d6910b2088bb2a81107e2c |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import calfews_src
from calfews_src import *
from datetime import datetime
import os
import shutil
from configobj import ConfigObj
import json
import h5py
# get example data to help get output classes. Note that if new infrastructure, banking arrangements, etc, may need to update output classes.
modelno = pd.read_pickle('calfews_src/data/results/baseline_wy2017/p0/modelno0.pkl')
modelso = pd.read_pickle('calfews_src/data/results/baseline_wy2017/p0/modelso0.pkl')
# create nested dict to hold all possible output types
d = {'north':{'reservoirs': {}, 'delta':{}}, 'south':{'reservoirs':{}, 'contracts':{}, 'districts':{}, 'private':{}, 'waterbanks':{}}}
# northern reservoirs
for name in [x.name for x in modelno.reservoir_list]:
d['north']['reservoirs'][name] = {}
for output in ['S', 'R', 'R_to_delta', 'available_storage', 'outflow_release', 'days_til_full', 'contract_flooded', 'reclaimed_carryover']: # list reservoir outputs of interest here
d['north']['reservoirs'][name][output] = True
for input in ['Q', 'SNPK', 'downstream', 'fnf']: # list reservoir outputs of interest here
d['north']['reservoirs'][name][input] = True
# southern reservoirs
for name in [x.name for x in modelso.reservoir_list]:
d['south']['reservoirs'][name] = {}
for output in ['S', 'available_storage', 'outflow_release', 'days_til_full', 'reclaimed_carryover', 'contract_flooded', 'reclaimed_carryover', 'flood_spill', 'flood_deliveries']: # list reservoir outputs of interest here
d['south']['reservoirs'][name][output] = True
if name != 'sanluisstate' and name != 'sanluisfederal':
for input in ['Q', 'SNPK', 'downstream', 'fnf']: # list reservoir outputs of interest here
d['south']['reservoirs'][name][input] = True
# delta
for output in ['HRO_pump', 'TRP_pump', 'x2', 'outflow', 'inflow', 'uncontrolled_swp', 'uncontrolled_cvp', 'remaining_outflow', 'swp_allocation', 'cvp_allocation']: # list delta outputs here
d['north']['delta'][output] = True
for input in ['gains', 'gains_sac', 'gains_sj', 'depletions', 'vernalis_flow', 'eastside_streams', 'OMR', 'forecastSRI', 'forecastSJI']: # list delta outputs here
d['north']['delta'][input] = True
# contracts, need to account for discrepancy in object names between contract classes vs names in district class deliveries
contract_dict = {'friant1': 'friant1', 'friant2': 'friant2', 'tableA': 'swpdelta', 'cvpdelta': 'cvpdelta',
'exchange': 'cvpexchange', 'cvc': 'crossvalley', 'kern': 'kernriver', 'tule': 'tuleriver',
'kaweah': 'kaweahriver', 'kings': 'kingsriver'}
for name in [contract_dict[x.name] for x in modelso.contract_list]:
d['south']['contracts'][name] = {}
for output in ['allocation', 'available_water']: # list contract outputs here
d['south']['contracts'][name][output] = True
d['south']['contracts'][name]['daily_supplies'] = {}
for output in ['contract', 'carryover', 'turnback', 'flood', 'total_carryover']:
d['south']['contracts'][name]['daily_supplies'][output] = True
# districts
for name, district_key in zip([x.name for x in modelso.district_list], [x.key for x in modelso.district_list]):
d['south']['districts'][name] = {}
for contract in modelso.__getattribute__(name).contract_list:
for output in ['projected', 'delivery', 'carryover', 'recharged', 'dynamic_recharge_cap']: # list district outputs, for contract/right allocations
d['south']['districts'][name][contract + '_' + output] = True
for contract in modelso.__getattribute__(name).contract_list_all:
for output in ['flood', 'flood_irrigation']: # list district outputs, for contracts/rights with no allocation
# if (np.max(modelso.__getattribute__(name).daily_supplies_full[contract + '_' + output]) > 0):
d['south']['districts'][name][contract + '_' + output] = True
#for bank in modelso.__getattribute__(name).delivery_location_list:
for output in ['recharged',]:
d['south']['districts'][name][district_key + '_' + output] = True
for bank in [xx.key for xx in modelso.waterbank_list]:
for output in ['recharged',]:
d['south']['districts'][name][bank + '_' + output] = True
for bank in [xx.key for xx in modelso.leiu_list]:
for output in ['recharged',]:
d['south']['districts'][name][bank + '_' + output] = True
for output in ['recover_banked', 'inleiu_irrigation', 'inleiu_recharge', 'leiupumping', 'exchanged_GW', 'exchanged_SW', 'pumping', 'irr_demand', 'tot_demand', 'dynamic_recovery_cap']:
d['south']['districts'][name][output] = True
# private
for name in [x.name for x in modelso.private_list]:
d['south']['private'][name] = {}
for district in modelso.__getattribute__(name).district_list:
for contract in modelso.__getattribute__(name).contract_list:
for output in ['projected', 'delivery', 'carryover', 'recharged', 'dynamic_recharge_cap']: # list district outputs, for contract/right allocations
d['south']['private'][name][district + '_' + contract + '_' + output] = True
for contract in modelso.__getattribute__(name).contract_list_all:
for output in ['flood']: # list district outputs, for contracts/rights with no allocation
# if (np.max(modelso.__getattribute__(name).daily_supplies_full[contract + '_' + output]) > 0):
d['south']['private'][name][district + '_' + contract + '_' + output] = True
#for bank in modelso.__getattribute__(name).delivery_location_list:
for output in ['recharged',]:
d['south']['private'][name][district + '_' + district + '_' + output] = True
for bank in [xx.key for xx in modelso.waterbank_list]:
for output in ['recharged',]:
d['south']['private'][name][district + '_' + bank + '_' + output] = True
for bank in [xx.key for xx in modelso.leiu_list]:
for output in ['recharged',]:
d['south']['private'][name][district + '_' + bank + '_' + output] = True
for output in ['recover_banked', 'inleiu', 'leiupumping', 'exchanged_GW', 'exchanged_SW', 'pumping', 'irr_demand', 'tot_demand', 'dynamic_recovery_cap']:
d['south']['private'][name][district + '_' + output] = True
# private
for name in [x.name for x in modelso.city_list]:
d['south']['private'][name] = {}
for district in modelso.__getattribute__(name).district_list:
for contract in modelso.__getattribute__(name).contract_list:
for output in ['projected', 'delivery', 'carryover', 'recharged', 'dynamic_recharge_cap']: # list district outputs, for contract/right allocations
d['south']['private'][name][district + '_' + contract + '_' + output] = True
for contract in modelso.__getattribute__(name).contract_list_all:
for output in ['flood']: # list district outputs, for contracts/rights with no allocation
# if (np.max(modelso.__getattribute__(name).daily_supplies_full[contract + '_' + output]) > 0):
d['south']['private'][name][district + '_' + contract + '_' + output] = True
for output in ['recharged',]:
d['south']['private'][name][district + '_' + district + '_' + output] = True
for bank in [xx.key for xx in modelso.waterbank_list]:
for output in ['recharged',]:
d['south']['private'][name][district + '_' + bank + '_' + output] = True
for bank in [xx.key for xx in modelso.leiu_list]:
for output in ['recharged',]:
d['south']['private'][name][district + '_' + bank + '_' + output] = True
for output in ['recover_banked', 'inleiu', 'leiupumping', 'exchanged_GW', 'exchanged_SW', 'pumping', 'irr_demand', 'tot_demand', 'dynamic_recovery_cap']:
d['south']['private'][name][district + '_' + output] = True
for name in [x.name for x in modelso.waterbank_list]:
d['south']['waterbanks'][name] = {}
for partner in modelso.__getattribute__(name).bank_timeseries.keys():
d['south']['waterbanks'][name][partner] = True
for name in [x.name for x in modelso.leiu_list]:
d['south']['waterbanks'][name] = {}
for partner in modelso.__getattribute__(name).bank_timeseries.keys():
d['south']['waterbanks'][name][partner] = True
with open('calfews_src/data/input/output_list.json', 'w') as f:
json.dump(d, f, indent=2)
# with open('calfews_src/data/input/output_list.json', 'r') as f:
# dat=json.load(f)
|
py | b4066df95e0cc51b6bf6b1b6af2d3d924e80b299 | import collections
import logging
import hashlib
import json
import jsonschema
import os
import threading
from typing import Any, Dict
import ray
import ray._private.services as services
from ray.autoscaler._private.providers import _get_default_config
from ray.autoscaler._private.docker import validate_docker_config
from ray.autoscaler.tags import NODE_TYPE_LEGACY_WORKER, NODE_TYPE_LEGACY_HEAD
REQUIRED, OPTIONAL = True, False
RAY_SCHEMA_PATH = os.path.join(
os.path.dirname(ray.autoscaler.__file__), "ray-schema.json")
# Internal kv keys for storing debug status.
DEBUG_AUTOSCALING_ERROR = "__autoscaling_error"
DEBUG_AUTOSCALING_STATUS = "__autoscaling_status"
logger = logging.getLogger(__name__)
class ConcurrentCounter:
def __init__(self):
self._lock = threading.RLock()
self._counter = collections.defaultdict(int)
def inc(self, key, count):
with self._lock:
self._counter[key] += count
return self.value
def dec(self, key, count):
with self._lock:
self._counter[key] -= count
assert self._counter[key] >= 0, "counter cannot go negative"
return self.value
def breakdown(self):
with self._lock:
return dict(self._counter)
@property
def value(self):
with self._lock:
return sum(self._counter.values())
def validate_config(config: Dict[str, Any]) -> None:
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
with open(RAY_SCHEMA_PATH) as f:
schema = json.load(f)
try:
jsonschema.validate(config, schema)
except jsonschema.ValidationError as e:
raise e from None
# Detect out of date defaults. This happens when the autoscaler that filled
# out the default values is older than the version of the autoscaler that
# is running on the cluster.
if "cluster_synced_files" not in config:
raise RuntimeError(
"Missing 'cluster_synced_files' field in the cluster "
"configuration. This is likely due to the Ray version running "
"in the cluster {ray_version} is greater than the Ray version "
"running on your laptop. Please try updating Ray on your local "
"machine and make sure the versions match.".format(
ray_version=ray.__version__))
if "available_node_types" in config:
if "head_node_type" not in config:
raise ValueError(
"You must specify `head_node_type` if `available_node_types "
"is set.")
if config["head_node_type"] not in config["available_node_types"]:
raise ValueError(
"`head_node_type` must be one of `available_node_types`.")
if "worker_default_node_type" not in config:
raise ValueError("You must specify `worker_default_node_type` if "
"`available_node_types is set.")
if (config["worker_default_node_type"] not in config[
"available_node_types"]):
raise ValueError("`worker_default_node_type` must be one of "
"`available_node_types`.")
def prepare_config(config):
with_defaults = fillout_defaults(config)
merge_setup_commands(with_defaults)
validate_docker_config(with_defaults)
return with_defaults
def rewrite_legacy_yaml_to_available_node_types(
config: Dict[str, Any]) -> Dict[str, Any]:
if "available_node_types" not in config:
# TODO(ameer/ekl/alex): we can also rewrite here many other fields
# that include initialization/setup/start commands and ImageId.
logger.debug("Converting legacy cluster config to multi node types.")
config["available_node_types"] = {
NODE_TYPE_LEGACY_HEAD: {
"node_config": config["head_node"],
"resources": config["head_node"].get("resources") or {},
"min_workers": 0,
"max_workers": 0,
},
NODE_TYPE_LEGACY_WORKER: {
"node_config": config["worker_nodes"],
"resources": config["worker_nodes"].get("resources") or {},
"min_workers": config.get("min_workers", 0),
"max_workers": config.get("max_workers", 0),
},
}
config["head_node_type"] = NODE_TYPE_LEGACY_HEAD
config["worker_default_node_type"] = NODE_TYPE_LEGACY_WORKER
return config
def fillout_defaults(config: Dict[str, Any]) -> Dict[str, Any]:
defaults = _get_default_config(config["provider"])
defaults.update(config)
defaults["auth"] = defaults.get("auth", {})
defaults = rewrite_legacy_yaml_to_available_node_types(defaults)
return defaults
def merge_setup_commands(config):
config["head_setup_commands"] = (
config["setup_commands"] + config["head_setup_commands"])
config["worker_setup_commands"] = (
config["setup_commands"] + config["worker_setup_commands"])
return config
def with_head_node_ip(cmds, head_ip=None):
if head_ip is None:
head_ip = services.get_node_ip_address()
out = []
for cmd in cmds:
out.append("export RAY_HEAD_IP={}; {}".format(head_ip, cmd))
return out
def hash_launch_conf(node_conf, auth):
hasher = hashlib.sha1()
# For hashing, we replace the path to the key with the
# key itself. This is to make sure the hashes are the
# same even if keys live at different locations on different
# machines.
full_auth = auth.copy()
for key_type in ["ssh_private_key", "ssh_public_key"]:
if key_type in auth:
with open(os.path.expanduser(auth[key_type])) as key:
full_auth[key_type] = key.read()
hasher.update(
json.dumps([node_conf, full_auth], sort_keys=True).encode("utf-8"))
return hasher.hexdigest()
# Cache the file hashes to avoid rescanning it each time. Also, this avoids
# inadvertently restarting workers if the file mount content is mutated on the
# head node.
_hash_cache = {}
def hash_runtime_conf(file_mounts,
cluster_synced_files,
extra_objs,
generate_file_mounts_contents_hash=False):
"""Returns two hashes, a runtime hash and file_mounts_content hash.
The runtime hash is used to determine if the configuration or file_mounts
contents have changed. It is used at launch time (ray up) to determine if
a restart is needed.
The file_mounts_content hash is used to determine if the file_mounts or
cluster_synced_files contents have changed. It is used at monitor time to
determine if additional file syncing is needed.
"""
runtime_hasher = hashlib.sha1()
contents_hasher = hashlib.sha1()
def add_content_hashes(path, allow_non_existing_paths: bool = False):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
contents_hasher.update(chunk)
path = os.path.expanduser(path)
if allow_non_existing_paths and not os.path.exists(path):
return
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
contents_hasher.update(dirpath.encode("utf-8"))
for name in filenames:
contents_hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Only generate a contents hash if generate_contents_hash is true or
# if we need to generate the runtime_hash
if conf_str not in _hash_cache or generate_file_mounts_contents_hash:
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
head_node_contents_hash = contents_hasher.hexdigest()
# Generate a new runtime_hash if its not cached
# The runtime hash does not depend on the cluster_synced_files hash
# because we do not want to restart nodes only if cluster_synced_files
# contents have changed.
if conf_str not in _hash_cache:
runtime_hasher.update(conf_str)
runtime_hasher.update(head_node_contents_hash.encode("utf-8"))
_hash_cache[conf_str] = runtime_hasher.hexdigest()
# Add cluster_synced_files to the file_mounts_content hash
if cluster_synced_files is not None:
for local_path in sorted(cluster_synced_files):
# For cluster_synced_files, we let the path be non-existant
# because its possible that the source directory gets set up
# anytime over the life of the head node.
add_content_hashes(local_path, allow_non_existing_paths=True)
file_mounts_contents_hash = contents_hasher.hexdigest()
else:
file_mounts_contents_hash = None
return (_hash_cache[conf_str], file_mounts_contents_hash)
def add_prefix(info_string, prefix):
"""Prefixes each line of info_string, except the first, by prefix."""
lines = info_string.split("\n")
prefixed_lines = [lines[0]]
for line in lines[1:]:
prefixed_line = ":".join([prefix, line])
prefixed_lines.append(prefixed_line)
prefixed_info_string = "\n".join(prefixed_lines)
return prefixed_info_string
|
py | b4066e73c04d6241eaafe119bb0acac90bc4e098 | # -*- coding: utf-8 -*-
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
from framework.celery.celery import celery
from framework.tasks.main_task import main_tasks_function
if __name__ == "__main__":
main_tasks_function.delay() |
py | b4066e9f042fb732ee4118255071aa3615ea2f61 | import numpy
from chainer import backend
from chainer import function_node
import chainer.functions
import chainer.utils
from chainer.utils import type_check
class SelectorBase(function_node.FunctionNode):
"""Select an array element from a given axis or set of axes."""
def __init__(self, axis=None, keepdims=False):
self.keepdims = keepdims
if axis is None:
self.axis = None
elif isinstance(axis, int):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
y = self.get_retained_outputs()[0]
if self.axis is None:
axis = range(x.ndim)
else:
axis = [ax % x.ndim for ax in self.axis]
# Add broadcastable dimensions to y and gy
# for each one that was reduced in the forward operation
shape = [s if ax not in axis else 1 for ax, s in enumerate(x.shape)]
gy = gy[0].reshape(shape)
y = y.reshape(shape)
# Compute the gradient
cond = (x.data == y.data)
gy = chainer.functions.broadcast_to(gy, cond.shape)
return gy * cond,
class Max(SelectorBase):
def _fwd(self, x, xp):
return xp.amax(x, axis=self.axis, keepdims=self.keepdims)
class Min(SelectorBase):
def _fwd(self, x, xp):
return xp.amin(x, axis=self.axis, keepdims=self.keepdims)
class IndexSelectorBase(function_node.FunctionNode):
"""Select index of an array element from a given axis."""
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('None or int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f'
)
if self.axis is not None:
if self.axis >= 0:
type_check.expect(
self.axis < in_types[0].ndim,
)
else:
type_check.expect(
-self.axis - 1 < in_types[0].ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, grad_outputs):
return None,
class ArgMin(IndexSelectorBase):
def _fwd(self, x, xp):
return xp.argmin(x, axis=self.axis).astype(numpy.int32)
class ArgMax(IndexSelectorBase):
def _fwd(self, x, xp):
return xp.argmax(x, axis=self.axis).astype(numpy.int32)
def max(x, axis=None, keepdims=False):
"""Maximum of array elements over a given axis.
Args:
x (~chainer.Variable): Array to be maximized.
axis (None, int, or tuple of int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Max(axis, keepdims).apply((x,))[0]
def min(x, axis=None, keepdims=False):
"""Minimum of array elements over a given axis.
Args:
x (~chainer.Variable): Array to be minimized.
axis (None, int, or tuple of int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Min(axis, keepdims).apply((x,))[0]
def argmax(x, axis=None):
"""Returns index which holds maximum of array elements over a given axis.
Args:
x (~chainer.Variable): Array to find maximum elements.
axis (None or int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMax(axis).apply((x,))[0]
def argmin(x, axis=None):
"""Returns index which holds minimum of array elements over a given axis.
Args:
x (~chainer.Variable): Array to find minimum elements.
axis (None or int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMin(axis).apply((x,))[0]
|
py | b4066fa12877e25588b0f49ac6771d0b81ae4a9b | #!/usr/bin/env python
u"""
podaac_grace_sync.py
Written by Tyler Sutterley (10/2021)
Syncs GRACE/GRACE-FO and auxiliary data from the NASA JPL PO.DAAC Drive Server
Syncs CSR/GFZ/JPL GSM files for Release-06
Gets the latest technical note (TN) files
https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-
https-earthdata-login-enabled
http://www.voidspace.org.uk/python/articles/authentication.shtml#base64
Register with NASA Earthdata Login system:
https://urs.earthdata.nasa.gov
Add PO.DAAC Drive OPS to NASA Earthdata Applications and get WebDAV Password
https://podaac-tools.jpl.nasa.gov/drive
OUTPUTS:
CSR/GFZ/JPL RL06 GSM
Tellus degree one coefficients (TN-13)
Technical notes for satellite laser ranging coefficients
COMMAND LINE OPTIONS:
--help: list the command line options
-N X, --netrc X: path to .netrc file for authentication
-D X, --directory X: working data directory
-C X, --center X: GRACE Processing Center
-R X, --release X: GRACE data releases to sync (RL06)
-l, --log: output log of files downloaded
-M X, --mode X: Local permissions mode of the directories and files synced
PYTHON DEPENDENCIES:
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
future: Compatibility layer between Python 2 and Python 3
https://python-future.org/
PROGRAM DEPENDENCIES:
utilities.py: download and management utilities for syncing files
"""
from __future__ import print_function
import sys
import os
import re
import netrc
import shutil
import logging
import argparse
import traceback
import posixpath
import lxml.etree
import multiprocessing as mp
import gravity_toolkit.utilities
#-- PURPOSE: create and compile regular expression operator to find GRACE files
def compile_regex_pattern(PROC, DREL, DSET):
if ((DSET == 'GSM') and (PROC == 'CSR') and (DREL in ('RL04','RL05'))):
#-- CSR GSM: only monthly degree 60 products
#-- not the longterm degree 180, degree 96 dataset or the
#-- special order 30 datasets for the high-resonance months
release, = re.findall(r'\d+', DREL)
args = (DSET, int(release))
regex_pattern=r'{0}-2_\d+-\d+_\d+_UTCSR_0060_000{1:d}.gz$' .format(*args)
elif ((DSET == 'GSM') and (PROC == 'CSR') and (DREL == 'RL06')):
#-- CSR GSM RL06: only monthly degree 60 products
release, = re.findall(r'\d+', DREL)
args = (DSET, '(GRAC|GRFO)', 'BA01', int(release))
regex_pattern=r'{0}-2_\d+-\d+_{1}_UTCSR_{2}_0{3:d}00.gz$' .format(*args)
elif ((DSET == 'GSM') and (PROC == 'GFZ') and (DREL == 'RL04')):
#-- GFZ RL04: only unconstrained solutions (not GK2 products)
regex_pattern=r'{0}-2_\d+-\d+_\d+_EIGEN_G---_0004.gz$'.format(DSET)
elif ((DSET == 'GSM') and (PROC == 'GFZ') and (DREL == 'RL05')):
#-- GFZ RL05: updated RL05a products which are less constrained to
#-- the background model. Allow regularized fields
regex_unconst=r'{0}-2_\d+-\d+_\d+_EIGEN_G---_005a.gz$'.format(DSET)
regex_regular=r'{0}-2_\d+-\d+_\d+_EIGEN_GK2-_005a.gz$'.format(DSET)
regex_pattern=r'{0}|{1}'.format(regex_unconst,regex_regular)
elif ((DSET == 'GSM') and (PROC == 'GFZ') and (DREL == 'RL06')):
#-- GFZ GSM RL06: only monthly degree 60 products
release, = re.findall(r'\d+', DREL)
args = (DSET, '(GRAC|GRFO)', 'BA01', int(release))
regex_pattern=r'{0}-2_\d+-\d+_{1}_GFZOP_{2}_0{3:d}00.gz$' .format(*args)
elif (PROC == 'JPL') and DREL in ('RL04','RL05'):
#-- JPL: RL04a and RL05a products (denoted by 0001)
release, = re.findall(r'\d+', DREL)
args = (DSET, int(release))
regex_pattern=r'{0}-2_\d+-\d+_\d+_JPLEM_0001_000{1:d}.gz$'.format(*args)
elif ((DSET == 'GSM') and (PROC == 'JPL') and (DREL == 'RL06')):
#-- JPL GSM RL06: only monthly degree 60 products
release, = re.findall(r'\d+', DREL)
args = (DSET, '(GRAC|GRFO)', 'BA01', int(release))
regex_pattern=r'{0}-2_\d+-\d+_{1}_JPLEM_{2}_0{3:d}00.gz$' .format(*args)
else:
regex_pattern=r'{0}-2_(.*?).gz$'.format(DSET)
#-- return the compiled regular expression operator used to find files
return re.compile(regex_pattern, re.VERBOSE)
#-- PURPOSE: sync local GRACE/GRACE-FO files with JPL PO.DAAC drive server
def podaac_grace_sync(DIRECTORY,PROC,DREL=[],PROCESSES=0,TIMEOUT=360,RETRY=5,
LOG=False,MODE=None):
#-- check if directory exists and recursively create if not
os.makedirs(DIRECTORY,MODE) if not os.path.exists(DIRECTORY) else None
#-- remote https server for GRACE data
HOST = 'https://podaac-tools.jpl.nasa.gov'
#-- sync GSM datasets
DSET = 'GSM'
#-- compile HTML parser for lxml
parser = lxml.etree.HTMLParser()
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- format: PODAAC_sync.log
LOGFILE = 'PODAAC_sync.log'
logging.basicConfig(filename=os.path.join(DIRECTORY,LOGFILE),
level=logging.INFO)
logging.info('PO.DAAC Sync Log')
logging.info('CENTERS={0}'.format(','.join(PROC)))
logging.info('RELEASES={0}'.format(','.join(DREL)))
else:
#-- standard output (terminal output)
logging.basicConfig(level=logging.INFO)
#-- list of GRACE data files and modification times
remote_files = []
remote_mtimes = []
local_files = []
#-- SLR C2,0 COEFFICIENTS
PATH = [HOST,'drive','files','allData','grace','docs']
remote_dir = posixpath.join(*PATH)
local_dir = os.path.expanduser(DIRECTORY)
#-- compile regular expression operator for remote files
R1 = re.compile(r'TN-(05|07|11)_C20_SLR.txt', re.VERBOSE)
#-- open connection with PO.DAAC drive server at remote directory
files,mtimes = gravity_toolkit.utilities.drive_list(PATH,
timeout=TIMEOUT,build=False,parser=parser,pattern=R1,sort=True)
#-- for each file on the remote server
for colname,remote_mtime in zip(files,mtimes):
#-- remote and local versions of the file
remote_files.append(posixpath.join(remote_dir,colname))
remote_mtimes.append(remote_mtime)
local_files.append(os.path.join(local_dir,colname))
#-- SLR C3,0 COEFFICIENTS
PATH = [HOST,'drive','files','allData','gracefo','docs']
remote_dir = posixpath.join(*PATH)
local_dir = os.path.expanduser(DIRECTORY)
#-- compile regular expression operator for remote files
R1 = re.compile(r'TN-(14)_C30_C20_GSFC_SLR.txt', re.VERBOSE)
#-- open connection with PO.DAAC drive server at remote directory
files,mtimes = gravity_toolkit.utilities.drive_list(PATH,
timeout=TIMEOUT,build=False,parser=parser,pattern=R1,sort=True)
#-- for each file on the remote server
for colname,remote_mtime in zip(files,mtimes):
#-- remote and local versions of the file
remote_files.append(posixpath.join(remote_dir,colname))
remote_mtimes.append(remote_mtime)
local_files.append(os.path.join(local_dir,colname))
#-- GRACE DATA
#-- PROCESSING CENTERS (CSR, GFZ, JPL)
for pr in PROC:
PATH = [HOST,'drive','files','allData','grace']
#-- DATA RELEASES (RL06)
for rl in DREL:
#-- modifiers for intermediate data releases
if (pr == 'JPL') and (rl in ('RL04','RL05')):
#-- JPL RELEASE 4 = RL04.1
#-- JPL RELEASE 5 = RL05.1 (11/2014)
drel_str = '{0}.1'.format(rl)
else:
drel_str = rl
#-- remote directory for data release
PATH.extend(['L2',pr,drel_str])
remote_dir = posixpath.join(*PATH)
#-- open connection with PO.DAAC drive server at remote directory
colnames,mtimes = gravity_toolkit.utilities.drive_list(PATH,
timeout=TIMEOUT,build=False,parser=parser,sort=True)
#-- local directory for exact data product
local_dir = os.path.join(DIRECTORY, pr, rl, DSET)
#-- check if directory exists and recursively create if not
if not os.path.exists(local_dir):
os.makedirs(local_dir,MODE)
#-- compile regular expression operator to find GRACE files
R1 = re.compile(r'({0}-(.*?)(gz|txt|dif))'.format(DSET),re.VERBOSE)
line = [i for i,f in enumerate(colnames) if R1.match(f)]
#-- for each file on the remote server
for i in line:
#-- remote and local versions of the file
remote_files.append(posixpath.join(remote_dir,colnames[i]))
remote_mtimes.append(mtimes[i])
local_files.append(os.path.join(local_dir,colnames[i]))
#-- GRACE-FO DATA
#-- PROCESSING CENTERS (CSR, GFZ, JPL)
#-- GRACE-FO data are stored separately for each year
for pr in PROC:
PATH = [HOST,'drive','files','allData','gracefo']
#-- DATA RELEASES (RL06)
valid_gracefo_releases = [d for d in DREL if d not in ('RL04','RL05')]
for rl in valid_gracefo_releases:
#-- remote directory for data release
PATH.extend(['L2',pr,rl])
#-- open connection with PO.DAAC drive server at remote directory
R2 = re.compile(r'\d{4}',re.VERBOSE)
years,mtimes = gravity_toolkit.utilities.drive_list(PATH,
timeout=TIMEOUT,build=False,parser=parser,pattern=R2,sort=True)
for yr in years:
#-- add the year directory to the path
PATH.append(yr)
remote_dir = posixpath.join(*PATH)
#-- open connection with PO.DAAC drive server at remote directory
colnames,mtimes=gravity_toolkit.utilities.drive_list(PATH,
timeout=TIMEOUT,build=False,parser=parser,sort=True)
#-- local directory for exact data product
local_dir = os.path.join(DIRECTORY, pr, rl, DSET)
#-- check if directory exists and recursively create if not
if not os.path.exists(local_dir):
os.makedirs(local_dir,MODE)
#-- compile regular expression operator to find GRACE files
R1 = re.compile(r'({0}-(.*?)(gz|txt|dif))'.format(DSET))
line = [i for i,f in enumerate(colnames) if R1.match(f)]
#-- for each file on the remote server
for i in line:
#-- remote and local versions of the file
remote_files.append(posixpath.join(remote_dir,colnames[i]))
remote_mtimes.append(mtimes[i])
local_files.append(os.path.join(local_dir,colnames[i]))
#-- remove the year directory to the path
PATH.remove(yr)
#-- sync in series if PROCESSES = 0
if (PROCESSES == 0):
#-- sync each GRACE/GRACE-FO data file
for i,remote_file in enumerate(remote_files):
#-- sync GRACE/GRACE-FO files with PO.DAAC Drive server
output = http_pull_file(remote_file, remote_mtimes[i],
local_files[i], TIMEOUT=TIMEOUT, RETRY=RETRY, MODE=MODE)
#-- print the output string
logging.info(output)
else:
#-- sync in parallel with multiprocessing Pool
pool = mp.Pool(processes=PROCESSES)
#-- sync each GRACE/GRACE-FO data file
out = []
for i,remote_file in enumerate(remote_files):
#-- sync GRACE/GRACE-FO files with PO.DAAC Drive server
args = (remote_file,remote_mtimes[i],local_files[i])
kwds = dict(TIMEOUT=TIMEOUT, RETRY=RETRY, MODE=MODE)
out.append(pool.apply_async(multiprocess_sync,args=args,kwds=kwds))
#-- start multiprocessing jobs
#-- close the pool
#-- prevents more tasks from being submitted to the pool
pool.close()
#-- exit the completed processes
pool.join()
#-- print the output string
for output in out:
logging.info(output.get())
#-- create index file for GRACE/GRACE-FO L2 Spherical Harmonic Data
#-- PROCESSING CENTERS (CSR, GFZ, JPL)
for pr in PROC:
#-- DATA RELEASES (RL06)
for rl in DREL:
#-- DATA PRODUCTS (GSM)
#-- local directory for exact data product
local_dir = os.path.join(DIRECTORY, pr, rl, DSET)
#-- Create an index file for each GRACE product
#-- finding all dataset files *.gz in directory
rx = compile_regex_pattern(pr, rl, DSET)
#-- find local GRACE files to create index
grace_files=[fi for fi in os.listdir(local_dir) if rx.match(fi)]
#-- outputting GRACE filenames to index
with open(os.path.join(local_dir,'index.txt'),'w') as fid:
for fi in sorted(grace_files):
print('{0}'.format(fi), file=fid)
#-- change permissions of index file
os.chmod(os.path.join(local_dir,'index.txt'), MODE)
#-- close log file and set permissions level to MODE
if LOG:
os.chmod(os.path.join(DIRECTORY,LOGFILE), MODE)
#-- PURPOSE: wrapper for running the sync program in multiprocessing mode
def multiprocess_sync(remote_file, remote_mtime, local_file,
TIMEOUT=0, RETRY=5, MODE=0o775):
try:
output = http_pull_file(remote_file,remote_mtime,local_file,
TIMEOUT=TIMEOUT,RETRY=RETRY,MODE=MODE)
except Exception as e:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
logging.critical('process id {0:d} failed'.format(os.getpid()))
logging.error(traceback.format_exc())
else:
return output
#-- PURPOSE: pull file from a remote host checking if file exists locally
#-- and if the remote file is newer than the local file
def http_pull_file(remote_file, remote_mtime, local_file,
TIMEOUT=0, RETRY=5, MODE=0o775):
#-- output string for printing files transferred
output = '{0} --> \n\t{1}\n'.format(remote_file,local_file)
#-- chunked transfer encoding size
CHUNK = 16 * 1024
#-- attempt to download up to the number of retries
retry_counter = 0
while (retry_counter < RETRY):
#-- attempt to retrieve file from https server
try:
#-- Create and submit request.
#-- There are a wide range of exceptions that can be thrown here
#-- including HTTPError and URLError.
request = gravity_toolkit.utilities.urllib2.Request(remote_file)
response = gravity_toolkit.utilities.urllib2.urlopen(request,
timeout=TIMEOUT)
#-- copy contents to local file using chunked transfer encoding
#-- transfer should work properly with ascii and binary formats
with open(local_file, 'wb') as f:
shutil.copyfileobj(response, f, CHUNK)
except:
pass
else:
break
#-- add to retry counter
retry_counter += 1
#-- check if maximum number of retries were reached
if (retry_counter == RETRY):
raise TimeoutError('Maximum number of retries reached')
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime))
os.chmod(local_file, MODE)
#-- return the output string
return output
#-- Main program that calls podaac_grace_sync()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Syncs GRACE/GRACE-FO and auxiliary data from the
NASA JPL PO.DAAC Drive Server.
Gets the latest technical note (TN) files.
"""
)
#-- command line parameters
#-- NASA Earthdata credentials
parser.add_argument('--netrc','-N',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.path.join(os.path.expanduser('~'),'.netrc'),
help='Path to .netrc file for authentication')
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- number of processes to run in parallel
parser.add_argument('--np','-P',
metavar='PROCESSES', type=int, default=0,
help='Number of processes to run in parallel')
#-- GRACE/GRACE-FO processing center
parser.add_argument('--center','-c',
metavar='PROC', type=str, nargs='+',
default=['CSR','GFZ','JPL'], choices=['CSR','GFZ','JPL'],
help='GRACE/GRACE-FO processing center')
#-- GRACE/GRACE-FO data release
parser.add_argument('--release','-r',
metavar='DREL', type=str, nargs='+',
default=['RL06'], choices=['RL04','RL05','RL06'],
help='GRACE/GRACE-FO data release')
#-- connection timeout
parser.add_argument('--timeout','-t',
type=int, default=360,
help='Timeout in seconds for blocking operations')
#-- Output log file in form PODAAC_sync.log
parser.add_argument('--log','-l',
default=False, action='store_true',
help='Output log file')
#-- permissions mode of the directories and files synced (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files synced')
args = parser.parse_args()
#-- JPL PO.DAAC drive hostname
HOST = 'podaac-tools.jpl.nasa.gov'
#-- get NASA Earthdata and JPL PO.DAAC drive credentials
USER,_,PASSWORD = netrc.netrc(args.netrc).authenticators(HOST)
#-- build a urllib opener for PO.DAAC Drive
#-- Add the username and password for NASA Earthdata Login system
gravity_toolkit.utilities.build_opener(USER,PASSWORD)
#-- check internet connection before attempting to run program
#-- check JPL PO.DAAC Drive credentials before attempting to run program
if gravity_toolkit.utilities.check_credentials():
podaac_grace_sync(args.directory, args.center, DREL=args.release,
PROCESSES=args.np, TIMEOUT=args.timeout, LOG=args.log,
MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
|
py | b406708bbe0c0655cc80f9b6aa3e48d47ec7ddc8 | mod_ds = """Python wrapper for reactor parameters."""
class_ds = \
"""This data structure is a set of physical reactor parameters. It may
be used to instantiate new reactor objects **OR** to define default
settings for a reactor type. The data stored in this class is copied
over to a reactor instance in the initialize() method. However, the
attributes of this objects take on more natural names than their reactor
attribute analogies. This is because it is this object that Bright users
will more often be interacting with.
"""
desc = {
'docstrings': {
'class': class_ds,
'attrs': {},
},
'attrs': {},
'extra': {},
}
mod = {'ReactorParameters': desc,
'docstring': mod_ds,}
desc['docstrings']['attrs']['batches'] = \
"""This is the total number of batches (int) in the fuel management scheme.
This is typically indexed by b."""
desc['docstrings']['attrs']['flux'] = \
"""The nominal flux value (float) that the library for this reactor type was
generated with. Often used to correctly weight batch-specific fluxes."""
desc['docstrings']['attrs']['fuel_form'] = \
"""This is the chemical form of fuel as a dictionary or other mapping. Keys are
often strings that represent isotopes while values represent the corresponding
mass weights. The heavy metal concentration by the key "IHM".
This will automatically fill in the nuclides in mat_feed for the "IHM" weight.
For example, LWRs typically use a UOX fuel form::
ReactorParameters.fuel_form = {"IHM": 1.0, "O16": 2.0}
"""
desc['docstrings']['attrs']['cladding_form'] = \
"""This is the chemical form of cladding as a dictionary or other mapping.
This uses the same notation as fuel_form except that "IHM" is no longer
a valid key. Cladding is often made from some zircalloy.
"""
desc['docstrings']['attrs']['coolant_form'] = \
"""This is the chemical form of coolant as a dictionary or other mapping.
This uses the same notation as fuel_form except that "IHM" is no longer
a valid key. The term 'coolant' is used in preference over the term
'moderator' because not all reactors moderate neutrons. For example,
LWRs often cool the reactor core with borated water::
ReactorParamters.coolant_form = {}
ReactorParamters.coolant_form["H1"] = 2.0
ReactorParamters.coolant_form["O16"] = 1.0
ReactorParamters.coolant_form["B10"] = 0.199 * 550 * 10.0**-6
ReactorParamters.coolant_form["B11"] = 0.801 * 550 * 10.0**-6
"""
desc['docstrings']['attrs']['fuel_density'] = \
"""The fuel region density. A float in units of [g/cm^3]."""
desc['docstrings']['attrs']['cladding_density'] = \
"""The cladding region density. A float in units of [g/cm^3]."""
desc['docstrings']['attrs']['coolant_density'] = \
"""The coolant region density. A float in units of [g/cm^3]."""
desc['docstrings']['attrs']['pnl'] = \
"""The reactor's non-leakage probability (float). This is often used as a
calibration parameter."""
desc['docstrings']['attrs']['BUt'] = \
"""The reactor's target discharge burnup (float). This is given
in units of [MWd/kgIHM]. Often the actual discharge burnup BUd does not
quite hit this value, but comes acceptably close."""
desc['docstrings']['attrs']['specific_power'] = \
"""The specific power of the fuel (float) in units of [MW/kgIHM]"""
desc['docstrings']['attrs']['burn_regions'] = \
"""Number of annular burn regions (int)."""
desc['docstrings']['attrs']['burn_times'] = \
"""A non-negative, monotonically increasing numpy float array (C++ vector<double>)
of burnup times [days]."""
desc['docstrings']['attrs']['use_disadvantage_factor'] = \
"""Boolaean to determine whether the thermal disadvantage factor is employed or not.
LWRs typically set this as True while FRs have a False value."""
desc['docstrings']['attrs']['lattice_type'] = \
"""Flag (str) that represents what lattice type the fuel assemblies are arranged in.
Currently accepted values are "Planar", "Spherical", and "Cylindrical"."""
desc['docstrings']['attrs']['rescale_hydrogen'] = \
"""Boolean to determine whether the reactor should rescale the Hydrogen-1 destruction
rate in the coolant as a function of fluence. The scaling factor is calculated via
the following equation
.. math:: f(F) = 1.36927 - 0.01119 \cdot BU(F)
This is typically not done for fast reactors but is a useful correction for LWRs.
"""
desc['docstrings']['attrs']['burnup_via_constant'] = \
"""Flag (str) for constant "flux" or "power" calculations."""
desc['docstrings']['attrs']['branch_ratio_cutoff'] = \
"""The cutoff value (float) below which the bateman equations are not solved."""
desc['docstrings']['attrs']['fuel_radius'] = \
"""The radius (float) of the fuel region [cm]."""
desc['docstrings']['attrs']['void_radius'] = \
"""The radius (float) of the void region [cm]."""
desc['docstrings']['attrs']['clad_radius'] = \
"""The radius (float) of the cladding region [cm]."""
desc['docstrings']['attrs']['unit_cell_pitch'] = \
"""The pitch or length (float) of the unit fuel pin cell [cm]."""
desc['docstrings']['attrs']['open_slots'] = \
"""The number of slots (float) in a fuel assembly that are open. Thus this is the
number of slots that do not contain a fuel pin and are instead filled in by coolant.
"""
desc['docstrings']['attrs']['total_slots'] = \
"""The total number of fuel pin slots (float) in a fuel assembly.
For a 17x17 bundle this is 289.0.
"""
desc['extra']['cpppxd'] = \
""" ReactorParameters fill_lwr_defaults() except +
ReactorParameters fill_fr_defaults() except +"""
desc['extra']['pyx'] = \
'''def lwr_defaults():
"""This function returns a copy of the LWR default presets. These are applicable to most cases.
However, if you want to use your own LWR parameters, it is recommended you use this function
and then only change the necessary attributes.
Returns
-------
lwrd : ReactorParameters
Light water reactor default parameters.
Warnings
--------
Note that the target burnup default value is zero. Generally, at least this value should be overridden.
"""
cdef cpp_reactor_parameters.ReactorParameters cpp_lwrd = cpp_reactor_parameters.fill_lwr_defaults()
cdef ReactorParameters lwrd = ReactorParameters()
(<cpp_reactor_parameters.ReactorParameters *> lwrd._inst)[0] = cpp_lwrd
return lwrd
def fr_defaults():
"""This function returns a copy of the FR default presets. These are applicable to most cases.
However, if you want to use your own FR parameters, it is recommended you use this function
and then only change the necessary attributes.
Returns
-------
frd : ReactorParameters
Fast reactor default parameters.
Warnings
--------
Note that the target burnup default value is zero. Generally, at least this value should be overridden.
"""
cdef cpp_reactor_parameters.ReactorParameters cpp_frd = cpp_reactor_parameters.fill_fr_defaults()
cdef ReactorParameters frd = ReactorParameters()
(<cpp_reactor_parameters.ReactorParameters *> frd._inst)[0] = cpp_frd
return frd
'''
|
py | b40670d539e9e5ab1f7a1d6e40e31b23764f6007 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexEndpointServiceGrpcTransport
class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport):
"""gRPC AsyncIO backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.CreateIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create index endpoint method over gRPC.
Creates an IndexEndpoint.
Returns:
Callable[[~.CreateIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index_endpoint" not in self._stubs:
self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint",
request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index_endpoint"]
@property
def get_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.GetIndexEndpointRequest],
Awaitable[index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the get index endpoint method over gRPC.
Gets an IndexEndpoint.
Returns:
Callable[[~.GetIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index_endpoint" not in self._stubs:
self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint",
request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize,
response_deserializer=index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["get_index_endpoint"]
@property
def list_index_endpoints(
self,
) -> Callable[
[index_endpoint_service.ListIndexEndpointsRequest],
Awaitable[index_endpoint_service.ListIndexEndpointsResponse],
]:
r"""Return a callable for the list index endpoints method over gRPC.
Lists IndexEndpoints in a Location.
Returns:
Callable[[~.ListIndexEndpointsRequest],
Awaitable[~.ListIndexEndpointsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_index_endpoints" not in self._stubs:
self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints",
request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize,
response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize,
)
return self._stubs["list_index_endpoints"]
@property
def update_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.UpdateIndexEndpointRequest],
Awaitable[gca_index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the update index endpoint method over gRPC.
Updates an IndexEndpoint.
Returns:
Callable[[~.UpdateIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index_endpoint" not in self._stubs:
self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint",
request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize,
response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["update_index_endpoint"]
@property
def delete_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.DeleteIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete index endpoint method over gRPC.
Deletes an IndexEndpoint.
Returns:
Callable[[~.DeleteIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index_endpoint" not in self._stubs:
self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint",
request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index_endpoint"]
@property
def deploy_index(
self,
) -> Callable[
[index_endpoint_service.DeployIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the deploy index method over gRPC.
Deploys an Index into this IndexEndpoint, creating a
DeployedIndex within it.
Only non-empty Indexes can be deployed.
Returns:
Callable[[~.DeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_index" not in self._stubs:
self._stubs["deploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex",
request_serializer=index_endpoint_service.DeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_index"]
@property
def undeploy_index(
self,
) -> Callable[
[index_endpoint_service.UndeployIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the undeploy index method over gRPC.
Undeploys an Index from an IndexEndpoint, removing a
DeployedIndex from it, and freeing all resources it's
using.
Returns:
Callable[[~.UndeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_index" not in self._stubs:
self._stubs["undeploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex",
request_serializer=index_endpoint_service.UndeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_index"]
def close(self):
return self.grpc_channel.close()
__all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",)
|
py | b406712457fc6db60c234e6e759899f4e1c1d809 | MacRomanEncoding = (
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'space', 'exclam',
'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less',
'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
'asciicircum', 'underscore', 'grave', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', None, 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave',
'ecircumflex', 'edieresis', 'iacute', 'igrave', 'icircumflex',
'idieresis', 'ntilde', 'oacute', 'ograve', 'ocircumflex', 'odieresis',
'otilde', 'uacute', 'ugrave', 'ucircumflex', 'udieresis', 'dagger',
'degree', 'cent', 'sterling', 'section', 'bullet', 'paragraph',
'germandbls', 'registered', 'copyright', 'trademark', 'acute',
'dieresis', None, 'AE', 'Oslash', None, 'plusminus', None, None, 'yen',
'mu', None, None, None, None, None, 'ordfeminine', 'ordmasculine', None,
'ae', 'oslash', 'questiondown', 'exclamdown', 'logicalnot', None, 'florin',
None, None, 'guillemotleft', 'guillemotright', 'ellipsis', 'space', 'Agrave',
'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', 'quotedblleft',
'quotedblright', 'quoteleft', 'quoteright', 'divide', None, 'ydieresis',
'Ydieresis', 'fraction', 'currency', 'guilsinglleft', 'guilsinglright',
'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave',
'Oacute', 'Ocircumflex', None, 'Ograve', 'Uacute', 'Ucircumflex',
'Ugrave', 'dotlessi', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron')
|
py | b406713f76da1c903c2a7b740301524e9dfa07f9 | """Movie-meta
Meta movie allows one to generate movie meta based on contents of directory.
Generates movie params from available IMDB API and helps you sort movie based
on ratings, genere, title etc.
@TODO:
1. Automated duplicacy detection.
2. Path watch module. Generate meta whenever new content in existing dir
is added.
3. Movie suggestions based on most viewed movie by current user.
4. skip processed ones (cache)
"""
import os,sys,time
import json
import requests
import logging
from guessit import guessit
from threading import Thread
from Queue import Queue
from argparse import ArgumentParser
class Threader:
"""Threader class.
Threader class is responsible for making multiple parallel requests
"""
def __init__(self, num_threads=5):
self.concurrent = num_threads
self.queue = Queue(num_threads * 2)
self.obj = None
def attach(self, obj):
self.obj = obj
def job(self):
while True:
url = self.queue.get()
if self.obj is not None:
response = self.obj.worker(url)
self.result(response)
self.queue.task_done()
def result(self,response):
try:
self.obj.result(response)
except Exception:
print "Exception occured"
def start(self):
for i in range(self.concurrent):
t = Thread(target=self.job)
t.daemon = True
t.start()
def submit(self):
try:
self.obj.prepare(self.queue)
except KeyboardInterrupt:
sys.exit(1)
class MovieMeta:
"""MovieMeta class.
MovieMeta class is the main entry point for the whole program.
It is responsible for getting api response, as well as generating
sort table.
"""
def __init__(self, current_dir="", log=False):
self.currentDir = current_dir
#Constants
self.SUBDIR_SEP = ","
self.SUBDIR_FILE = "subdir.txt"
self.URL_TITLE = 'http://www.omdbapi.com/?t={0}'
self.URL_TITLE_YEAR = 'http://www.omdbapi.com/?t={0}&y={1}'
#Arrays and stuffs
self.movieJsonArr = []
self.movieArr = []
self.imdbJSON = []
self.subdirs = []
self.makelog = log
self.logger = None
self.start_time = 0
#Init stuff
self._processed = False
if len(self.currentDir) == 0:
self.currentDir = os.getcwd()
subdir = ''
if os.path.isfile(self.SUBDIR_FILE):
with open(self.SUBDIR_FILE) as f:subdir = f.readline()
self.subdir(subdir)
self.debug(log)
def subdir(self, subdirs = ""):
self.subdirs = str(subdirs).split(self.SUBDIR_SEP)
def debug(self, log):
self.makelog = log
if log:
FORMAT = '%(asctime)-8s %(levelname)s : %(message)s'
self.logger = logging.getLogger("moviemeta")
logging.basicConfig(filename='moviemeta.log',level=logging.DEBUG,format=FORMAT)
def makeJSON (self):
for movieObj in self.movieArr:
mov = {}
mov["title"] = movieObj.get("title") if movieObj.get("title") is not None else None
mov["year"] = movieObj.get("year") if movieObj.get("year") is not None else None
self.movieJsonArr.append(mov)
self.movieJSON = json.dumps(self.movieJsonArr)
self._processed = True
if self.makelog:
self.logger.info('Finished local directory parsing...')
return self.movieJSON
def _walk(self,top):
""" Our custom walker , see os.walk()"""
try:
names = os.listdir(top)
except os.error:
return
for name in names:
if self.subdirs.count(name) > 0:
name = os.path.join(top, name)
if os.path.isdir(name):
self._walk(name)
else:
self.movieArr.append( guessit(name) )
def _process(self):
#only proceed if request is not already honored
if not self._processed:
if self.makelog:
self.logger.info('Starting local directory parsing...')
self._walk( current_dir )
self.makeJSON()
def get(self,json=False):
self._process()
return self.movieJSON if json else self.movieJsonArr
def writeFile(self,filename="moviemeta.txt"):
f = open(filename, "w")
f.write(json.dumps(self.imdbJSON))
f.close()
def getResult(self):
return json.dumps(self.imdbJSON)
def prepare(self,queue):
movies = self.get()
for movie in movies:
url = self.getURL(movie)
queue.put({'title':movie["title"],'url':url})
queue.join()
def result(self,response):
self.imdbJSON.append(response)
def worker(self,obj):
if self.makelog:
self.logger.info('Fetching movie data: %s...', obj['title'])
try:
fetchedDetails = requests.get(obj['url'])
details = fetchedDetails.content
movieJSON = self.parseIMDBResponse( obj['title'], json.loads(details) )
return movieJSON
#catch timeouts and report
except requests.exceptions.Timeout:
if self.makelog:
self.logger.warning('Error fetching movie data: %s [ERR_TIMEOUT]', obj['title'])
return {"Response":False,"Error":"err_timeout"}
#catch connection breaks and report
except requests.exceptions.ConnectionError:
if self.makelog:
self.logger.warning('Error fetching movie data: %s [ERR_CONNECT]', obj['title'])
return {"Response":False,"Error":"err_connect"}
def getURL(self,movie):
url = ""
if movie["title"] is not None:
url = self.URL_TITLE.format(movie["title"])
if movie["year"] is not None:
url = self.URL_TITLE_YEAR.format(movie["title"],movie["year"])
return url
def getIMDB(self, jsonify = True):
self.start_time = time.time()
self._process()
if not self.movieJsonArr:
if self.makelog:
self.logger.info('No movies to fetch')
return
for movie in self.movieJsonArr:
url = self.getURL(movie)
movieJSON = self.worker({'title':movie["title"],'url':url})
self.imdbJSON.append(movieJSON)
end_time = time.time() - self.start_time
if self.makelog:
self.logger.info('All data fetched in %.3f s', end_time)
self.start_time = 0
return json.dumps(self.imdbJSON) if jsonify else self.imdbJSON
def parseIMDBResponse( self, title, resp ):
IMDBResp = {}
IMDBResp["movieTitle"] = title
IMDBResp["movieYear"] = ''
IMDBResp["movieRuntime"] = ''
IMDBResp["movieGenre"] = ''
IMDBResp["moviePlot"] = ''
IMDBResp["movieMeta"] = ''
IMDBResp["movieImdb"] = ''
IMDBResp["movieAwards"] = ''
if resp['Response'] == "False":
#report to error log
return IMDBResp
if resp['Title']:
IMDBResp["movieTitle"] = resp['Title'].encode('utf-8').replace('"','\\"')
if resp['Year']:
IMDBResp["movieYear"] = resp['Year'].encode('utf-8').replace('"','\\"')
if (resp['Runtime']):
IMDBResp["movieRuntime"] = "" if resp['Runtime'] == "N/A" else resp['Runtime'].encode('utf-8').replace('"','\\"')
if resp['Genre']:
IMDBResp["movieGenre"] = resp['Genre'].encode('utf-8').replace('"','\\"')
if resp['Plot']:
IMDBResp["moviePlot"] = resp['Plot'].encode('utf-8').replace('"','\\"')
if resp['Metascore'] == "N/A":
IMDBResp["movieMeta"] = 0
elif resp['Metascore']:
IMDBResp["movieMeta"] = resp['Metascore'].encode('utf-8').replace('"','\\"')
if resp['imdbRating']:
IMDBResp["movieImdb"] = 0 if resp['imdbRating'] == "N/A" else resp['imdbRating'].encode('utf-8').replace('"','\\"')
if resp['Awards']:
IMDBResp["movieAwards"] = resp['Awards'].encode('utf-8').replace('"','\\"')
return IMDBResp
if __name__ == "__main__":
#Check if external arguments are passed
parser = ArgumentParser(description='Movie meta generator')
parser.add_argument("-d", help="Directory path")
parser.add_argument("--log", action='store_true', help="log behaviour")
parser.add_argument("-s", action='store_true', help="Use in sequential mode")
args = parser.parse_args()
current_dir = ""
if getattr(args, 'd') is not None:
current_dir = str(getattr(args, 'd'))
sequential = getattr(args, 's')
#create a MovieMeta object to get movies from mentioned directory
movies = MovieMeta( current_dir )
#Use logger? (True/False)
movies.debug(getattr(args, 'log'))
if sequential:
"""
This method makes requests serially
"""
movies.getIMDB()
else:
"""
use this method to make parallel requests.
5 concurrent requests are enough. Make it more if you have
really large movie collection in single directory.
Optionally, you can go into multicore CPU pooling,
but its not really required here.
"""
start_time = time.time()
threader = Threader(5)
threader.attach(movies)
threader.start()
threader.submit()
#Measure your elapsed time after all threads finished execution
end_time = time.time() - start_time
print "Finished in %.3f s" % end_time
#Write the JSON string to file
movies.writeFile()
"""At this point, you should have required JSON"""
#print movies.getResult()
|
py | b406720034125154b9be96935903805e2329add5 | # Copyright (c) 2014-2019, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""iocage plugin module"""
import collections
import concurrent.futures
import contextlib
import datetime
import distutils.dir_util
import json
import logging
import os
import git
import pathlib
import re
import shutil
import subprocess as su
import requests
import urllib.parse
import uuid
import iocage_lib.ioc_common
import iocage_lib.ioc_create
import iocage_lib.ioc_destroy
import iocage_lib.ioc_exec
import iocage_lib.ioc_json
import iocage_lib.ioc_upgrade
import iocage_lib.ioc_exceptions
import libzfs
import texttable
class IOCPlugin(object):
"""
This is responsible for the general life cycle of a plugin jail. This
includes creation, updating and upgrading.
"""
PLUGIN_VERSION = '2'
DEFAULT_PROPS = {
'vnet': 1,
'boot': 1
}
def __init__(
self, release=None, jail=None, plugin=None, branch=None,
keep_jail_on_failure=False, callback=None, silent=False, **kwargs
):
self.pool = iocage_lib.ioc_json.IOCJson().json_get_value("pool")
self.iocroot = iocage_lib.ioc_json.IOCJson(
self.pool).json_get_value("iocroot")
self.zfs = libzfs.ZFS(history=True, history_prefix="<iocage>")
self.release = release
if os.path.exists(plugin or ''):
self.plugin_json_path = plugin
plugin = plugin.rsplit('/', 1)[-1].rstrip('.json')
if self.plugin_json_path == jail:
# If user specified a complete path to plugin json file
# jail would be having the same value. We ensure that we don't
# do that here.
jail = f'{plugin}_{str(uuid.uuid4())[:4]}'
else:
self.plugin_json_path = None
self.plugin = plugin
self.jail = jail
self.http = kwargs.pop("http", True)
self.hardened = kwargs.pop("hardened", False)
self.date = datetime.datetime.utcnow().strftime("%F")
self.branch = branch
self.silent = silent
self.callback = callback
self.keep_jail_on_failure = keep_jail_on_failure
self.thickconfig = kwargs.pop('thickconfig', False)
self.log = logging.getLogger('iocage')
# If we have a jail which exists for this plugin, we will like to
# enforce the plugin to respect the github repository it was
# created from for updates/upgrades etc. If for some reason, this
# is not desired, the user is free to change it via "set" manually
# on his own.
# TODO: For a lack of ability to do this efficiently/correctly here,
# the above should be enforced by the caller of IOCPlugin
self.git_repository = kwargs.get(
'git_repository'
) or 'https://github.com/freenas/iocage-ix-plugins.git'
self.git_destination = kwargs.get('git_destination')
if not self.git_destination:
# If not provided, we use git repository uri and split on scheme
# and convert slashes/dot to underscore to guarantee uniqueness
# i.e github_com_freenas_iocage-ix-plugins_git
self.git_destination = os.path.join(
self.iocroot, '.plugins', self.git_repository.split(
'://', 1)[-1].replace('/', '_').replace('.', '_')
)
if self.branch is None and not self.hardened:
freebsd_version = su.run(['freebsd-version'],
stdout=su.PIPE,
stderr=su.STDOUT)
r = freebsd_version.stdout.decode().rstrip().split('-', 1)[0]
self.branch = f'{r}-RELEASE' if '.' in r else f'{r}.0-RELEASE'
elif self.branch is None and self.hardened:
# Backwards compat
self.branch = 'master'
def pull_clone_git_repo(self, depth=None):
self._clone_repo(
self.branch, self.git_repository, self.git_destination,
depth, self.callback
)
@staticmethod
def fetch_plugin_packagesites(package_sites):
def download_parse_packagesite(packagesite_url):
package_site_data = {}
try:
response = requests.get(f'{packagesite_url}/All/', timeout=20)
response.raise_for_status()
for pkg in re.findall(r'<a.*>\s*(\S+).txz</a>', response.text):
package_site_data[pkg.rsplit('-', 1)[0]] = \
iocage_lib.ioc_common.parse_package_name(pkg)
except Exception:
pass
return packagesite_url, package_site_data
plugin_packagesite_mapping = {}
package_sites = set([
url.rstrip('/') for url in package_sites
])
with concurrent.futures.ThreadPoolExecutor() as exc:
results = exc.map(
download_parse_packagesite, package_sites
)
for result in results:
plugin_packagesite_mapping[result[0]] = result[1]
return plugin_packagesite_mapping
@staticmethod
def fetch_plugin_versions_from_plugin_index(plugins_index):
plugin_packagesite_mapping = IOCPlugin.fetch_plugin_packagesites([
v['packagesite'] for v in plugins_index.values()
])
version_dict = {}
for plugin in plugins_index:
plugin_dict = plugins_index[plugin]
packagesite = plugin_dict['packagesite']
primary_package = plugin_dict.get('primary_pkg') or plugin
packagesite = packagesite.rstrip('/')
plugin_pkgs = plugin_packagesite_mapping[packagesite]
try:
version_data = plugin_pkgs[primary_package]
except KeyError:
plugin_dict.update({
k: 'N/A' for k in ('revision', 'version', 'epoch')
})
else:
plugin_dict.update(version_data)
version_dict[plugin] = plugin_dict
return version_dict
@staticmethod
def retrieve_plugin_index_data(plugin_index_path):
with open(
os.path.join(plugin_index_path, 'INDEX'), 'r'
) as f:
index = json.loads(f.read())
plugin_index = {}
for plugin in index:
plugin_index[plugin] = {
'primary_pkg': index[plugin].get('primary_pkg'),
}
with open(
os.path.join(plugin_index_path, index[plugin]['MANIFEST']), 'r'
) as f:
plugin_index[plugin].update(json.loads(f.read()))
return plugin_index
def fetch_plugin_versions(self):
self.pull_clone_git_repo()
plugin_index = self.retrieve_plugin_index_data(self.git_destination)
return self.fetch_plugin_versions_from_plugin_index(plugin_index)
def retrieve_plugin_json(self):
if not self.plugin_json_path:
_json = os.path.join(self.git_destination, f'{self.plugin}.json')
if not os.path.exists(self.git_destination):
self.pull_clone_git_repo()
else:
_json = self.plugin_json_path
self.log.debug(f'Plugin json file path: {_json}')
try:
with open(_json, 'r') as j:
conf = json.load(j)
except FileNotFoundError:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'{_json} was not found!'
},
_callback=self.callback
)
except json.decoder.JSONDecodeError:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': 'Invalid JSON file supplied, please supply a '
'correctly formatted JSON file.'
},
_callback=self.callback
)
return conf
def fetch_plugin(self, props, num, accept_license):
"""Helper to fetch plugins"""
plugins = self.fetch_plugin_index(props, index_only=True)
conf = self.retrieve_plugin_json()
if self.hardened:
conf['release'] = conf['release'].replace("-RELEASE", "-STABLE")
conf['release'] = re.sub(r"\W\w.", "-", conf['release'])
self.release = conf['release']
props, pkg = self.__fetch_plugin_props__(conf, props, num)
self.__fetch_plugin_inform__(conf, num, plugins, accept_license)
location = f"{self.iocroot}/jails/{self.jail}"
try:
devfs = conf.get("devfs_ruleset", None)
if devfs is not None:
plugin_devfs = devfs[f'plugin_{self.jail}']
plugin_devfs_paths = plugin_devfs['paths']
for prop in props:
key, _, value = prop.partition("=")
if key == 'dhcp' and iocage_lib.ioc_common.check_truthy(
value
):
if 'bpf*' not in plugin_devfs_paths:
plugin_devfs_paths["bpf*"] = None
plugin_devfs_includes = None if 'includes' not in plugin_devfs\
else plugin_devfs['includes']
iocage_lib.ioc_common.generate_devfs_ruleset(
self.conf,
paths=plugin_devfs_paths,
includes=plugin_devfs_includes
)
jaildir, _conf, repo_dir = self.__fetch_plugin_create__(props)
self.__fetch_plugin_install_packages__(
jaildir, conf, pkg, props, repo_dir
)
self.__fetch_plugin_post_install__(conf, _conf, jaildir)
except BaseException as e:
if not self.keep_jail_on_failure:
msg = f'{self.jail} had a failure\n' \
f'Exception: {e.__class__.__name__} ' \
f'Message: {str(e)}\n' \
f'Partial plugin destroyed'
iocage_lib.ioc_destroy.IOCDestroy().destroy_jail(location)
iocage_lib.ioc_common.logit({
'level': 'EXCEPTION',
'message': msg
},
_callback=self.callback,
silent=self.silent)
raise
def __fetch_plugin_inform__(self, conf, num, plugins, accept_license):
"""Logs the pertinent information before fetching a plugin"""
if num <= 1:
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f"Plugin: {conf['name']}"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message":
f" Official Plugin: {conf.get('official', False)}"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f" Using RELEASE: {conf['release']}"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f" Using Branch: {self.branch}"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f" Post-install Artifact: {conf['artifact']}"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": " These pkgs will be installed:"
},
_callback=self.callback,
silent=self.silent)
for pkg in conf["pkgs"]:
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f" - {pkg}"
},
_callback=self.callback,
silent=self.silent)
# Name would be convenient, but it doesn't always gel with the
# JSON's title, pkg always does.
try:
license = plugins[pkg.split("/", 1)[-1]].get("license", False)
except UnboundLocalError:
license = plugins.get(
conf["name"].lower().split("/", 1)[-1],
conf
).get("license", False)
except KeyError:
# quassel-core is one that does this.
license = plugins.get(
conf["name"].strip("-").lower().split("/", 1)[-1],
conf
).get("license", False)
if license and not accept_license:
license_text = requests.get(license)
iocage_lib.ioc_common.logit(
{
"level": "WARNING",
"message":
" This plugin requires accepting a license "
"to proceed:"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "VERBOSE",
"message": f"{license_text.text}"
},
_callback=self.callback,
silent=self.silent)
agree = input("Do you agree? (y/N) ")
if agree.lower() != "y":
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message":
"You must accept the license to continue!"
},
_callback=self.callback)
def __fetch_plugin_props__(self, conf, props, num):
"""Generates the list of properties that a user and the JSON supply"""
self.release = conf["release"]
pkg_repos = conf["fingerprints"]
freebsd_version = f"{self.iocroot}/releases/{conf['release']}" \
"/root/bin/freebsd-version"
json_props = conf.get("properties", {})
truthy_inverse = iocage_lib.ioc_common.truthy_inverse_values()
props = {p.split('=')[0]: p.split('=')[1] for p in list(props)}
network_props = {
'nat': truthy_inverse, 'dhcp': truthy_inverse,
'ip4_addr': ('none',), 'ip6_addr=': ('none',)
}
for p, v in json_props.items():
# The JSON properties are going to be treated as user entered
# ones on the command line. If the users prop exists on the
# command line, we will skip the JSON one.
if p not in props:
if p in network_props and v not in network_props[p]:
# This means that "p" is enabled in the plugin manifest
# We should now ensure that we don't have any other
# connectivity option enabled
network_props.pop(p)
if any(
nk in props and props[nk] not in nv
for nk, nv in network_props.items()
):
# This means that some other network option has
# been specified which is enabled and we don't want
# to add the plugin manifest default
continue
props[p] = v
if not os.path.isdir(f"{self.iocroot}/releases/{self.release}"):
iocage_lib.ioc_common.check_release_newer(
self.release, self.callback, self.silent)
self.__fetch_release__(self.release)
if conf["release"][:4].endswith("-"):
# 9.3-RELEASE and under don't actually have this binary.
release = conf["release"]
else:
iocage_lib.ioc_common.check_release_newer(
self.release, self.callback, self.silent)
try:
with open(
freebsd_version, mode='r', encoding='utf-8'
) as r:
for line in r:
if line.startswith("USERLAND_VERSION"):
release = line.rstrip().partition("=")[2].strip(
'"')
except FileNotFoundError:
iocage_lib.ioc_common.logit(
{
"level": "WARNING",
"message": f"Release {self.release} missing, "
f"will attempt to fetch it."
},
_callback=self.callback,
silent=self.silent)
self.__fetch_release__(self.release)
# We still want this.
with open(
freebsd_version, mode='r', encoding='utf-8'
) as r:
for line in r:
if line.startswith("USERLAND_VERSION"):
release = line.rstrip().partition("=")[2].strip(
'"')
# We set our properties that we need, and then iterate over the user
# supplied properties replacing ours.
create_props = [f'release={release}'] + [
f'{k}={v}' for k, v in {**self.DEFAULT_PROPS, **props}.items()
]
if all(
props.get(k, 'none') == 'none'
for k in ('ip4_addr', 'ip6_addr')
) and not iocage_lib.ioc_common.boolean_prop_exists(
create_props, ['dhcp', 'nat', 'ip_hostname']
):
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': 'Network connectivity is required to fetch a '
'plugin. Please enable dhcp/nat or supply'
' a valid ip address.'
},
_callback=self.callback,
silent=self.silent)
# These properties are not user configurable
for prop in (
f'type=pluginv{self.PLUGIN_VERSION}',
f'plugin_name={self.plugin}',
f'plugin_repository={self.git_repository}',
):
create_props.append(prop)
return create_props, pkg_repos
def __fetch_plugin_create__(self, create_props):
"""Creates the plugin with the provided properties"""
iocage_lib.ioc_create.IOCCreate(
self.release,
create_props,
0,
silent=True,
basejail=True,
uuid=self.jail,
plugin=True,
thickconfig=self.thickconfig,
callback=self.callback
).create_jail()
jaildir = f"{self.iocroot}/jails/{self.jail}"
repo_dir = f"{jaildir}/root/usr/local/etc/pkg/repos"
path = f"{self.pool}/iocage/jails/{self.jail}"
_conf = iocage_lib.ioc_json.IOCJson(jaildir).json_get_value('all')
# We do these tests again as the user could supply a malformed IP to
# fetch that bypasses the more naive check in cli/fetch
auto_configs = _conf['dhcp'] or _conf['ip_hostname'] or _conf['nat']
if _conf["ip4_addr"] == "none" and _conf["ip6_addr"] == "none" and \
not auto_configs:
iocage_lib.ioc_common.logit(
{
"level": "ERROR",
"message": "\nAn IP address is needed to fetch a "
"plugin!\n"
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_destroy.IOCDestroy().destroy_jail(path)
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": "Destroyed partial plugin."
},
_callback=self.callback)
return jaildir, _conf, repo_dir
def __fetch_plugin_install_packages__(self, jaildir, conf, pkg_repos,
create_props, repo_dir):
"""Attempts to start the jail and install the packages"""
kmods = conf.get("kmods", {})
secure = True if "https://" in conf["packagesite"] else False
for kmod in kmods:
self.log.debug(f'Loading {kmod}')
try:
su.check_call(
["kldload", "-n", kmod], stdout=su.PIPE, stderr=su.PIPE)
except su.CalledProcessError:
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": "Module not found!"
},
_callback=self.callback)
if secure:
# Certificate verification
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Secure packagesite detected, installing "
"ca_root_nss package."
},
_callback=self.callback,
silent=self.silent)
err = iocage_lib.ioc_create.IOCCreate(
self.release,
create_props,
0,
pkglist=["ca_root_nss"],
silent=True,
callback=self.callback
).create_install_packages(
self.jail, jaildir
)
if err:
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message":
"pkg error, please try non-secure packagesite."
},
_callback=self.callback)
freebsd_conf = """\
FreeBSD: { enabled: no }
"""
try:
os.makedirs(repo_dir, 0o755)
except OSError:
# It exists, that's fine.
pass
with open(f"{jaildir}/root/usr/local/etc/pkg/repos/FreeBSD.conf",
"w") as f_conf:
f_conf.write(freebsd_conf)
for repo in pkg_repos:
repo_name = repo
repo = pkg_repos[repo]
f_dir = f"{jaildir}/root/usr/local/etc/pkg/fingerprints/" \
f"{repo_name}/trusted"
r_dir = f"{jaildir}/root/usr/local/etc/pkg/fingerprints/" \
f"{repo_name}/revoked"
repo_conf = """\
{reponame}: {{
url: "{packagesite}",
signature_type: "fingerprints",
fingerprints: "/usr/local/etc/pkg/fingerprints/{reponame}",
enabled: true
}}
"""
try:
os.makedirs(f_dir, 0o755)
os.makedirs(r_dir, 0o755)
except OSError:
iocage_lib.ioc_common.logit(
{
"level": "ERROR",
"message":
f"Repo: {repo_name} already exists, skipping!"
},
_callback=self.callback,
silent=self.silent)
r_file = f"{repo_dir}/{repo_name}.conf"
with open(r_file, "w") as r_conf:
r_conf.write(
repo_conf.format(
reponame=repo_name, packagesite=conf["packagesite"]))
f_file = f"{f_dir}/{repo_name}"
for r in repo:
finger_conf = """\
function: {function}
fingerprint: {fingerprint}
"""
with open(f_file, "w") as f_conf:
f_conf.write(
finger_conf.format(
function=r["function"],
fingerprint=r["fingerprint"]))
err = iocage_lib.ioc_create.IOCCreate(
self.release,
create_props,
0,
pkglist=conf["pkgs"],
silent=True,
plugin=True,
callback=self.callback
).create_install_packages(
self.jail, jaildir, repo=conf["packagesite"]
)
if err:
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": f"\npkg error:\n - {err}\n"
"\nRefusing to fetch artifact and run post_install.sh!"
},
_callback=self.callback)
def __fetch_plugin_post_install__(self, conf, _conf, jaildir):
"""Fetches the users artifact and runs the post install"""
status, jid = iocage_lib.ioc_list.IOCList().list_get_jid(self.jail)
if not status:
iocage_lib.ioc_start.IOCStart(self.jail, jaildir, silent=True)
ip4 = _conf['ip4_addr']
ip6 = _conf['ip6_addr']
ip = None
if ip6 != 'none':
ip = ','.join([
v.split('|')[-1].split('/')[0] for v in ip6.split(',')
])
if not ip and ip4 != 'none' and 'DHCP' not in ip4.upper():
ip = ','.join([
v.split('|')[-1].split('/')[0] for v in ip4.split(',')
])
if not ip:
if _conf['vnet']:
interface = _conf['interfaces'].split(',')[0].split(':')[0]
if interface == 'vnet0':
# Jails use epairNb by default inside
interface = f'{interface.replace("vnet", "epair")}b'
ip4_cmd = [
'jexec', f'ioc-{self.jail.replace(".", "_")}',
'ifconfig', interface, 'inet'
]
out = su.check_output(ip4_cmd).decode()
ip = f'{out.splitlines()[2].split()[1]}'
else:
ip = json.loads(
su.run([
'jls', '-j', f'ioc-{self.jail.replace(".", "_")}',
'--libxo', 'json'
], stdout=su.PIPE).stdout
)['jail-information']['jail'][0]['ipv4']
self.log.debug(f'IP for {self.plugin} - {self.jail}: {ip}.')
os.environ['IOCAGE_PLUGIN_IP'] = ip
plugin_env = {
**{
k: os.environ.get(k)
for k in ['http_proxy', 'https_proxy'] if os.environ.get(k)
},
'IOCAGE_PLUGIN_IP': ip
}
# We need to pipe from tar to the root of the jail.
if conf["artifact"]:
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "\nFetching artifact... "
},
_callback=self.callback,
silent=self.silent)
self.__update_pull_plugin_artifact__(conf)
with open(
f"{jaildir}/{self.plugin}.json", "w"
) as f:
f.write(json.dumps(conf, indent=4, sort_keys=True))
try:
shutil.copy(f"{jaildir}/plugin/post_install.sh",
f"{jaildir}/root/root")
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "\nRunning post_install.sh"
},
_callback=self.callback,
silent=self.silent)
command = ["/root/post_install.sh"]
try:
with iocage_lib.ioc_exec.IOCExec(
command, jaildir, uuid=self.jail, plugin=True,
skip=True, callback=self.callback,
su_env=plugin_env
) as _exec:
iocage_lib.ioc_common.consume_and_log(
_exec,
callback=self.callback
)
except iocage_lib.ioc_exceptions.CommandFailed as e:
message = b' '.join(e.message[-10:]).decode().rstrip()
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'Last 10 lines:\n{message}'
}, _callback=self.callback)
ui_json = f"{jaildir}/plugin/ui.json"
try:
with open(ui_json, "r") as u:
ui_data = json.load(u)
admin_portal = ui_data.get('adminportal', None)
doc_url = ui_data.get('docurl', None)
if admin_portal:
admin_portal = ','.join(
map(
lambda v: admin_portal.replace(
'%%IP%%', v
),
ip.split(',')
)
)
try:
ph = ui_data[
'adminportal_placeholders'
].items()
for placeholder, prop in ph:
admin_portal = admin_portal.replace(
placeholder,
iocage_lib.ioc_json.IOCJson(
jaildir).json_plugin_get_value(
prop.split('.'))
)
except KeyError:
pass
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': '\nAdmin Portal:\n'
f'{admin_portal}'
},
_callback=self.callback,
silent=self.silent)
if doc_url is not None:
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': f'\nDoc URL:\n{doc_url}'
},
_callback=self.callback,
silent=self.silent)
except FileNotFoundError:
# They just didn't set a admin portal or doc url.
pass
except FileNotFoundError:
pass
def fetch_plugin_index(
self, props, _list=False, list_header=False, list_long=False,
accept_license=False, icon=False, official=False, index_only=False
):
self.pull_clone_git_repo()
with open(os.path.join(self.git_destination, 'INDEX'), 'r') as plugins:
plugins = json.load(plugins)
if index_only:
return plugins
plugins_ordered_dict = collections.OrderedDict(
sorted({
k: {'name': v['name'], 'description': v['description']}
for k, v in plugins.items()
if not (official and not v.get('official', False))
}.items())
)
if self.plugin is None and not _list:
for i, p in enumerate(plugins_ordered_dict.items()):
k, v = p
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message':
f'[{i}] {v["name"]} - {v["description"]} ({k})'
},
_callback=self.callback,
silent=self.silent
)
if _list:
plugin_list = []
for k, v in plugins_ordered_dict.items():
plugin_dict = {
'name': v['name'],
'description': v['description'],
'plugin': k,
}
if not list_header:
plugin_dict.update({
'license': plugins[k].get('license', ''),
'official': plugins[k].get('official', False),
})
if icon:
plugin_dict['icon'] = plugins[k].get('icon', None)
plugin_list.append(plugin_dict)
if not list_header:
return plugin_list
else:
if list_long:
table = texttable.Texttable(max_width=0)
else:
table = texttable.Texttable(max_width=80)
list_header = ["NAME", "DESCRIPTION", "PKG"]
if icon:
list_header += ["ICON"]
plugin_list = [
[p['name'], p['description'], p['plugin']] + (
[p['icon']] if icon else []
)
for p in plugin_list
]
plugin_list.insert(0, list_header)
table.add_rows(plugin_list)
return table.draw()
if self.plugin is None:
self.plugin = input("\nType the number of the desired"
" plugin\nPress [Enter] or type EXIT to"
" quit: ")
self.plugin = self.__fetch_validate_plugin__(
self.plugin.lower(), plugins_ordered_dict
)
self.jail = f'{self.plugin}_{str(uuid.uuid4())[:4]}'
# We now run the fetch the user requested
self.fetch_plugin(props, 0, accept_license)
def __fetch_validate_plugin__(self, plugin, plugins):
"""
Checks if the user supplied an index number and returns the
plugin. If they gave us a plugin name, we make sure that exists in
the list at all.
"""
_plugin = plugin # Gets lost in the enumeration if no match is found.
if plugin.lower() == "exit":
exit()
if plugin.isdigit():
try:
plugin = list(plugins.items())[int(plugin)][0]
except IndexError:
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": f"Plugin: {_plugin} not in list!"
},
_callback=self.callback)
except ValueError:
exit()
else:
if plugin not in plugins:
for k, v in plugins.items():
if plugin == v['name']:
plugin = k
break
else:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'Plugin: {_plugin} not available.'
},
_callback=self.callback
)
return plugin
def __run_hook_script__(self, script_path):
# If the hook script has a service command, we want it to
# succeed. This is essentially a soft jail restart.
self.__stop_rc__()
path = f"{self.iocroot}/jails/{self.jail}"
jail_path = os.path.join(self.iocroot, 'jails', self.jail)
new_script_path = os.path.join(jail_path, 'root/tmp')
shutil.copy(script_path, new_script_path)
script_path = os.path.join(
new_script_path, script_path.split('/')[-1]
)
try:
with iocage_lib.ioc_exec.IOCExec(
['sh', os.path.join('/tmp', script_path.split('/')[-1])],
path,
uuid=self.jail,
plugin=True,
skip=True,
callback=self.callback
) as _exec:
iocage_lib.ioc_common.consume_and_log(
_exec,
callback=self.callback,
log=not self.silent
)
except iocage_lib.ioc_exceptions.CommandFailed as e:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': b'\n'.join(e.message).decode()
},
_callback=self.callback,
silent=self.silent
)
else:
self.__stop_rc__()
self.__start_rc__()
def update(self, jid):
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f"Snapshotting {self.jail}... "
},
_callback=self.callback,
silent=self.silent)
try:
self.__snapshot_jail__(name='update')
except iocage_lib.ioc_exceptions.Exists:
# User may have run update already (so clean) or they created this
# snapshot purposely, this is OK
pass
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Updating plugin INDEX... "
},
_callback=self.callback,
silent=self.silent)
self.pull_clone_git_repo()
plugin_conf = self.__load_plugin_json()
self.__check_manifest__(plugin_conf)
if plugin_conf['artifact']:
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': 'Updating plugin artifact... '
},
_callback=self.callback,
silent=self.silent
)
self.__update_pull_plugin_artifact__(plugin_conf)
pre_update_hook = os.path.join(
self.iocroot, 'jails', self.jail, 'plugin/pre_update.sh'
)
if os.path.exists(pre_update_hook):
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': 'Running pre_update.sh... '
},
_callback=self.callback,
silent=self.silent
)
self.__run_hook_script__(pre_update_hook)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Removing old pkgs... "
},
_callback=self.callback,
silent=self.silent)
self.__update_pkg_remove__(jid)
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Installing new pkgs... "
},
_callback=self.callback,
silent=self.silent)
self.__update_pkg_install__(plugin_conf)
if plugin_conf["artifact"]:
# We need to do this again to ensure that if some files
# were removed when we removed pkgs and the overlay directory
# is supposed to bring them back, this does that
self.__update_pull_plugin_artifact__(plugin_conf)
post_update_hook = os.path.join(
self.iocroot, 'jails', self.jail, 'plugin/post_update.sh'
)
if os.path.exists(post_update_hook):
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': 'Running post_update.sh... '
},
_callback=self.callback,
silent=self.silent
)
self.__run_hook_script__(post_update_hook)
self.__remove_snapshot__(name="update")
def __update_pull_plugin_artifact__(self, plugin_conf):
"""Pull the latest artifact to be sure we're up to date"""
path = f"{self.iocroot}/jails/{self.jail}"
shutil.rmtree(f"{path}/plugin", ignore_errors=True)
uri = urllib.parse.urlparse(plugin_conf['artifact'])
if uri.scheme == 'file':
artifact_path = urllib.parse.unquote(uri.path)
if not os.path.exists(artifact_path):
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'{artifact_path} does not exist!'
},
_callback=self.callback,
silent=self.silent
)
distutils.dir_util.copy_tree(
artifact_path,
os.path.join(path, 'plugin')
)
else:
self._clone_repo(
self.branch, plugin_conf['artifact'],
f'{path}/plugin', callback=self.callback
)
try:
distutils.dir_util.copy_tree(
f"{path}/plugin/overlay/",
f"{path}/root",
preserve_symlinks=True)
except distutils.errors.DistutilsFileError:
# It just doesn't exist
pass
def __update_pkg_remove__(self, jid):
"""Remove all pkgs from the plugin"""
try:
with iocage_lib.ioc_exec.IOCExec(
command=['pkg', '-j', jid, 'delete', '-a', '-f', '-y'],
path=f'{self.iocroot}/jails/{self.jail}',
uuid=self.jail,
callback=self.callback,
unjailed=True
) as _exec:
iocage_lib.ioc_common.consume_and_log(
_exec,
callback=self.callback,
log=not(self.silent)
)
except iocage_lib.ioc_exceptions.CommandFailed as e:
self.__rollback_jail__(name="update")
final_msg = "PKG error, update failed! Rolling back snapshot.\n"
iocage_lib.ioc_common.logit(
{
"level": "ERROR",
"message": b'\n'.join(e.message).decode()
},
_callback=self.callback,
silent=self.silent)
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": final_msg
},
_callback=self.callback)
def __update_pkg_install__(self, plugin_conf):
"""Installs all pkgs listed in the plugins configuration"""
path = f"{self.iocroot}/jails/{self.jail}"
try:
self.__fetch_plugin_install_packages__(
path, plugin_conf, plugin_conf['fingerprints'], [],
os.path.join(path, 'root/usr/local/etc/pkg/repos')
)
except (Exception, SystemExit):
iocage_lib.ioc_common.logit(
{
'level': 'ERROR',
'message': 'PKG error, update failed! '
'Rolling back snapshot.\n'
},
_callback=self.callback
)
self.__rollback_jail__(name='update')
raise
def upgrade(self, jid):
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": f"Snapshotting {self.jail}... "
},
_callback=self.callback,
silent=self.silent)
try:
self.__snapshot_jail__(name='upgrade')
except iocage_lib.ioc_exceptions.Exists:
# User may have run upgrade already (so clean) or they created this
# snapshot purposely, this is OK
pass
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Updating plugin INDEX... "
},
_callback=self.callback,
silent=self.silent)
self.pull_clone_git_repo()
plugin_conf = self.__load_plugin_json()
self.__check_manifest__(plugin_conf)
plugin_release = plugin_conf["release"]
iocage_lib.ioc_common.check_release_newer(
plugin_release, self.callback, self.silent)
# We want the new json to live with the jail
plugin_name = self.plugin.rsplit('_', 1)[0]
shutil.copy(
os.path.join(self.git_destination, f'{plugin_name}.json'),
os.path.join(
self.iocroot, 'jails', self.jail, f'{plugin_name}.json'
)
)
release_p = pathlib.Path(f"{self.iocroot}/releases/{plugin_release}")
if not release_p.exists():
iocage_lib.ioc_common.check_release_newer(
plugin_release, self.callback, self.silent)
iocage_lib.ioc_common.logit(
{
"level": "WARNING",
"message": "New plugin RELEASE missing, fetching now... "
},
_callback=self.callback,
silent=self.silent)
self.__fetch_release__(plugin_release)
path = f"{self.iocroot}/jails/{self.jail}/root"
iocage_lib.ioc_common.logit(
{
"level": "INFO",
"message": "Running upgrade... "
},
_callback=self.callback,
silent=self.silent)
new_release = iocage_lib.ioc_upgrade.IOCUpgrade(
plugin_release, path, silent=True).upgrade_basejail(
snapshot=False)
self.silent = True
self.update(jid)
return new_release
def __snapshot_jail__(self, name):
"""Snapshot the plugin"""
# Utilize the nicer API interface for this
import iocage_lib.iocage as ioc # Avoids dep issues
name = f"ioc_plugin_{name}_{self.date}"
ioc.IOCage(
jail=self.jail,
skip_jails=True,
silent=True
).snapshot(name)
def __rollback_jail__(self, name):
"""Rollback the plugins snapshot"""
# Utilize the nicer API interface for this
import iocage_lib.iocage as ioc # Avoids dep issues
name = f"ioc_plugin_{name}_{self.date}"
iocage = ioc.IOCage(
jail=self.jail,
skip_jails=True,
silent=True)
iocage.stop()
iocage.rollback(name)
def __load_plugin_json(self):
"""Load the plugins configuration"""
plugin_name = self.plugin.rsplit('_', 1)[0]
_json = os.path.join(self.git_destination, f'{plugin_name}.json')
try:
with open(_json, "r") as j:
_conf = json.load(j)
except FileNotFoundError:
_conf = self.__find_plugin_json(plugin_name)
except json.decoder.JSONDecodeError:
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message":
"Invalid JSON file supplied, please supply a "
"correctly formatted JSON file."
},
_callback=self.callback)
return _conf
def __find_plugin_json(self, plugin_name):
"""Matches the name of the local plugin's json with the INDEX's"""
_json = f'{self.iocroot}/jails/{self.plugin}/{plugin_name}.json'
try:
with open(_json, 'r') as j:
p_conf = json.load(j)
p_name = p_conf['name']
except FileNotFoundError:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'{_json} was not found!'
},
_callback=self.callback)
except json.decoder.JSONDecodeError:
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message':
'Invalid JSON file supplied, please supply a '
'correctly formatted JSON file.'
},
_callback=self.callback)
jsons = pathlib.Path(self.git_destination).glob('*.json')
for f in jsons:
_conf = json.loads(pathlib.Path(f).open('r').read())
if _conf['name'] == p_name:
return _conf
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'A plugin manifest matching {p_name} could not '
'be found!'
},
_callback=self.callback)
def __remove_snapshot__(self, name):
"""Removes all matching plugin snapshots"""
conf = iocage_lib.ioc_json.IOCJson(
f'{self.iocroot}/jails/{self.jail}'
).json_get_value('all')
release = conf['release']
names = [f'ioc_plugin_{name}_{self.date}', f'ioc_update_{release}']
dataset = self.zfs.get_dataset(
f'{self.pool}/iocage/jails/{self.jail}')
dataset_snaps = dataset.snapshots_recursive
for snap in dataset_snaps:
snap_name = snap.snapshot_name
if snap_name in names:
snap.delete()
def __stop_rc__(self):
iocage_lib.ioc_exec.SilentExec(
command=["/bin/sh", "/etc/rc.shutdown"],
path=f"{self.iocroot}/jails/{self.jail}",
uuid=self.jail,
callback=self.callback
)
def __start_rc__(self):
iocage_lib.ioc_exec.SilentExec(
command=["/bin/sh", "/etc/rc"],
path=f"{self.iocroot}/jails/{self.jail}",
uuid=self.jail,
callback=self.callback
)
def __check_manifest__(self, plugin_conf):
"""If the Major ABI changed, they cannot update anymore."""
jail_conf, write = iocage_lib.ioc_json.IOCJson(
location=f"{self.iocroot}/jails/{self.jail}").json_load()
jail_rel = int(jail_conf["release"].split(".", 1)[0])
manifest_rel = int(plugin_conf["release"].split(".", 1)[0])
manifest_major_minor = float(
plugin_conf["release"].rsplit("-", 1)[0].rsplit("-", 1)[0])
iocage_lib.ioc_common.check_release_newer(
manifest_major_minor, self.callback, self.silent)
if jail_rel < manifest_rel:
self.__remove_snapshot__(name="update")
iocage_lib.ioc_common.logit(
{
"level": "EXCEPTION",
"message": "Major ABI change detected, please run"
" 'upgrade' instead."
},
_callback=self.callback)
if write:
self.json_write(plugin_conf)
def __fetch_release__(self, release):
"""Will call fetch to get the new RELEASE the plugin will rely on"""
fetch_args = {'release': release, 'eol': False}
iocage_lib.iocage.IOCage(silent=self.silent).fetch(**fetch_args)
@staticmethod
def _verify_git_repo(repo_url, destination):
verified = False
with contextlib.suppress(
git.exc.InvalidGitRepositoryError,
git.exc.NoSuchPathError,
AttributeError,
):
repo = git.Repo(destination)
verified = any(u == repo_url for u in repo.remotes.origin.urls)
return verified
@staticmethod
def _clone_repo(ref, repo_url, destination, depth=None, callback=None):
"""
This is to replicate the functionality of cloning/pulling a repo
"""
branch = ref
try:
if os.path.exists(destination) and not IOCPlugin._verify_git_repo(
repo_url, destination
):
raise git.exc.InvalidGitRepositoryError()
# "Pull"
repo = git.Repo(destination)
origin = repo.remotes.origin
ref = 'master' if f'origin/{ref}' not in repo.refs else ref
for command in [
['checkout', ref],
['pull']
]:
iocage_lib.ioc_exec.SilentExec(
['git', '-C', destination] + command,
None, unjailed=True, decode=True,
su_env={
k: os.environ.get(k)
for k in ['http_proxy', 'https_proxy'] if
os.environ.get(k)
}
)
except (
iocage_lib.ioc_exceptions.CommandFailed,
git.exc.InvalidGitRepositoryError,
git.exc.NoSuchPathError
) as e:
basic_msg = 'Failed to update git repository:'
if isinstance(e, git.exc.NoSuchPathError):
f_msg = 'Cloning git repository'
elif isinstance(e, git.exc.InvalidGitRepositoryError):
f_msg = f'{basic_msg} Invalid Git Repository'
else:
f_msg = f'{basic_msg} ' \
f'{b" ".join(filter(bool, e.message)).decode()}'
iocage_lib.ioc_common.logit(
{
'level': 'ERROR',
'message': f_msg
}
)
# Clone
shutil.rmtree(destination, ignore_errors=True)
kwargs = {'env': os.environ.copy(), 'depth': depth}
repo = git.Repo.clone_from(
repo_url, destination, **{
k: v for k, v in kwargs.items() if v
}
)
origin = repo.remotes.origin
if not origin.exists():
iocage_lib.ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'Origin: {origin.url} does not exist!'
},
_callback=callback
)
if f'origin/{ref}' not in repo.refs:
ref = 'master'
msgs = [
f'\nBranch {branch} does not exist at {repo_url}!',
'Using "master" branch for plugin, this may not work '
'with your RELEASE'
]
for msg in msgs:
iocage_lib.ioc_common.logit(
{
'level': 'INFO',
'message': msg
},
_callback=callback
)
# Time to make this reality
repo.git.checkout(ref)
|
py | b4067236580a328818a1b2f869099bb8eeed0f23 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os, json, crypt
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
class TestHawqStandby(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = 'HAWQ/2.0.0/package'
STACK_VERSION = '2.3'
GPADMIN = 'gpadmin'
POSTGRES = 'postgres'
def __asserts_for_configure(self):
self.assertResourceCalled('Group', self.GPADMIN,
ignore_failures = True
)
self.assertResourceCalled('User', self.GPADMIN,
gid = self.GPADMIN,
groups = [self.GPADMIN, u'hadoop'],
ignore_failures = True,
password = crypt.crypt(self.getConfig()['configurations']['hawq-env']['hawq_password'], "$1$salt$")
)
self.assertResourceCalled('Group', self.POSTGRES,
ignore_failures = True
)
self.assertResourceCalled('User', self.POSTGRES,
gid = self.POSTGRES,
groups = [self.POSTGRES, u'hadoop'],
ignore_failures = True
)
self.assertResourceCalled('Execute', 'chown -R gpadmin:gpadmin /usr/local/hawq/',
timeout = 600
)
self.assertResourceCalled('XmlConfig', 'hdfs-client.xml',
conf_dir = '/usr/local/hawq/etc/',
configurations = self.getConfig()['configurations']['hdfs-client'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-client'],
group = self.GPADMIN,
owner = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('XmlConfig', 'yarn-client.xml',
conf_dir = '/usr/local/hawq/etc/',
configurations = self.getConfig()['configurations']['yarn-client'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-client'],
group = self.GPADMIN,
owner = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('XmlConfig', 'hawq-site.xml',
conf_dir = '/usr/local/hawq/etc/',
configurations = self.getConfig()['configurations']['hawq-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hawq-site'],
group = self.GPADMIN,
owner = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('File', '/usr/local/hawq/etc/hawq_check.cnf',
content = self.getConfig()['configurations']['hawq-check-env']['content'],
owner = self.GPADMIN,
group = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('File', '/usr/local/hawq/etc/slaves',
content = InlineTemplate('c6401.ambari.apache.org\nc6402.ambari.apache.org\nc6403.ambari.apache.org\n\n'),
group = self.GPADMIN,
owner = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('Directory', '/data/hawq/master',
group = self.GPADMIN,
owner = self.GPADMIN,
create_parents = True
)
self.assertResourceCalled('Execute', 'chmod 700 /data/hawq/master',
user = 'root',
timeout = 600
)
self.assertResourceCalled('Directory', '/data/hawq/tmp/master',
group = self.GPADMIN,
owner = self.GPADMIN,
create_parents = True
)
@patch ('hawqstandby.common.__set_osparams')
def test_configure_default(self, set_osparams_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + '/scripts/hawqstandby.py',
classname = 'HawqStandby',
command = 'configure',
config_file ='hawq_default.json',
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.__asserts_for_configure()
self.assertNoMoreResources()
@patch ('hawqstandby.common.__set_osparams')
def test_install_default(self, set_osparams_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + '/scripts/hawqstandby.py',
classname = 'HawqStandby',
command = 'install',
config_file ='hawq_default.json',
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.__asserts_for_configure()
self.assertNoMoreResources()
@patch ('hawqstandby.common.__set_osparams')
def test_start_default(self, set_osparams_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + '/scripts/hawqstandby.py',
classname = 'HawqStandby',
command = 'start',
config_file ='hawq_default.json',
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.__asserts_for_configure()
self.assertResourceCalled('Execute', 'source /usr/local/hawq/greenplum_path.sh && hawq init standby -a -v',
logoutput = True,
not_if = None,
only_if = None,
user = self.GPADMIN,
timeout = 900
)
self.assertNoMoreResources()
@patch ('hawqstandby.common.__set_osparams')
@patch ('common.get_local_hawq_site_property_value')
def test_stop_default(self, get_local_hawq_site_property_value_mock, set_osparams_mock):
get_local_hawq_site_property_value_mock.return_value = 5432
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + '/scripts/hawqstandby.py',
classname = 'HawqStandby',
command = 'stop',
config_file ='hawq_default.json',
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'source /usr/local/hawq/greenplum_path.sh && hawq stop standby -M fast -a -v',
logoutput = True,
not_if = None,
only_if = "netstat -tupln | egrep ':5432\\s' | egrep gpsyncmaster",
user = self.GPADMIN,
timeout = 900
)
self.assertNoMoreResources()
@patch ('common.get_local_hawq_site_property_value')
def test_activate_hawq_standby(self, get_local_hawq_site_property_value_mock):
"""Test Activate HAWQ Standby Command"""
get_local_hawq_site_property_value_mock.return_value = 5432
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + '/scripts/hawqstandby.py',
classname = 'HawqStandby',
command = 'activate_hawq_standby',
config_file ='hawq_default.json',
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('XmlConfig', 'hawq-site.xml',
conf_dir = '/usr/local/hawq/etc/',
configurations = self.getConfig()['configurations']['hawq-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hawq-site'],
group = self.GPADMIN,
owner = self.GPADMIN,
mode = 0644
)
self.assertResourceCalled('Execute', 'source /usr/local/hawq/greenplum_path.sh && export PGHOST=\"c6402.ambari.apache.org\" && hawq activate standby -a -M fast -v --ignore-bad-hosts',
logoutput = True,
not_if = None,
only_if = None,
user = self.GPADMIN,
timeout = 900
)
self.assertResourceCalled('Execute', 'source /usr/local/hawq/greenplum_path.sh && hawq stop master -M fast -a -v',
logoutput = True,
not_if = None,
only_if = "netstat -tupln | egrep ':5432\\s' | egrep postgres",
user = self.GPADMIN,
timeout = 900
)
self.assertNoMoreResources() |
py | b406723d3fa7323bb5358011f3ddfe9b7806c8fe | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from ..utilities import Vec3
def plot_turbines(
ax, layout_x, layout_y, yaw_angles, D, color=None, wind_direction=270.0
):
"""
Plot wind plant layout from turbine locations.
Args:
ax (:py:class:`matplotlib.pyplot.axes`): Figure axes.
layout_x (np.array): Wind turbine locations (east-west).
layout_y (np.array): Wind turbine locations (north-south).
yaw_angles (np.array): Yaw angles of each wind turbine.
D (float): Wind turbine rotor diameter.
color (str): Pyplot color option to plot the turbines.
wind_direction (float): Wind direction (rotates farm)
"""
# Correct for the wind direction
yaw_angles = np.array(yaw_angles) - wind_direction - 270
if color is None:
color = "k"
for x, y, yaw in zip(layout_x, layout_y, yaw_angles):
R = D / 2.0
x_0 = x + np.sin(np.deg2rad(yaw)) * R
x_1 = x - np.sin(np.deg2rad(yaw)) * R
y_0 = y - np.cos(np.deg2rad(yaw)) * R
y_1 = y + np.cos(np.deg2rad(yaw)) * R
ax.plot([x_0, x_1], [y_0, y_1], color=color)
def plot_turbines_with_fi(ax, fi, color=None):
"""
Wrapper function to plot turbines which extracts the data
from a FLORIS interface object
Args:
ax (:py:class:`matplotlib.pyplot.axes`): figure axes. Defaults
to None.
fi (:py:class:`floris.tools.flow_data.FlowData`):
FlowData object.
color (str, optional): Color to plot turbines
"""
# Grab D
for i, turbine in enumerate(fi.floris.farm.turbines):
D = turbine.rotor_diameter
break
plot_turbines(
ax,
fi.layout_x,
fi.layout_y,
fi.get_yaw_angles(),
D,
color=color,
wind_direction=fi.floris.farm.wind_map.input_direction,
)
def line_contour_cut_plane(cut_plane, ax=None, levels=None, colors=None, **kwargs):
"""
Visualize a cut_plane as a line contour plot.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`):
CutPlane Object.
ax (:py:class:`matplotlib.pyplot.axes`): Figure axes. Defaults
to None.
levels (np.array, optional): Contour levels for plot.
Defaults to None.
colors (list, optional): Strings of color specification info.
Defaults to None.
**kwargs: Additional parameters to pass to `ax.contour`.
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
x1_mesh = cut_plane.df.x1.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
x2_mesh = cut_plane.df.x2.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
u_mesh = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
Zm = np.ma.masked_where(np.isnan(u_mesh), u_mesh)
rcParams["contour.negative_linestyle"] = "solid"
# # Plot the cut-through
ax.contour(x1_mesh, x2_mesh, Zm, levels=levels, colors=colors, **kwargs)
# Make equal axis
ax.set_aspect("equal")
def visualize_cut_plane(
cut_plane, ax=None, minSpeed=None, maxSpeed=None, cmap="coolwarm", levels=None, title = None
):
"""
Generate pseudocolor mesh plot of the cut_plane.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`): 2D
plane through wind plant.
ax (:py:class:`matplotlib.pyplot.axes`): Figure axes. Defaults
to None.
minSpeed (float, optional): Minimum value of wind speed for
contours. Defaults to None.
maxSpeed (float, optional): Maximum value of wind speed for
contours. Defaults to None.
cmap (str, optional): Colormap specifier. Defaults to
'coolwarm'.
title : Image title
Returns:
im (:py:class:`matplotlib.plt.pcolormesh`): Image handle.
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = cut_plane.df.u.min()
if maxSpeed is None:
maxSpeed = cut_plane.df.u.max()
# Reshape to 2d for plotting
x1_mesh = cut_plane.df.x1.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
x2_mesh = cut_plane.df.x2.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
u_mesh = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
Zm = np.ma.masked_where(np.isnan(u_mesh), u_mesh)
# Plot the cut-through
im = ax.pcolormesh(
x1_mesh, x2_mesh, Zm, cmap=cmap, vmin=minSpeed, vmax=maxSpeed, shading="nearest"
)
# Add line contour
line_contour_cut_plane(
cut_plane, ax=ax, levels=levels, colors="w", linewidths=0.8, alpha=0.3
)
# Make equal axis
ax.set_aspect("equal")
# Make title
ax.set_title(title)
# Return im
return im
def visualize_quiver(
cut_plane, ax=None, minSpeed=None, maxSpeed=None, downSamp=1, **kwargs
):
"""
Visualize the in-plane flows in a cut_plane using quiver.
Args:
cut_plane (:py:class:`~.tools.cut_plane.CutPlane`): 2D
plane through wind plant.
ax (:py:class:`matplotlib.pyplot.axes`): Figure axes. Defaults
to None.
minSpeed (float, optional): Minimum value of wind speed for
contours. Defaults to None.
maxSpeed (float, optional): Maximum value of wind speed for
contours. Defaults to None.
downSamp (int, optional): Down sample the number of quiver arrows
from underlying grid.
**kwargs: Additional parameters to pass to `ax.streamplot`.
Returns:
im (:py:class:`matplotlib.plt.pcolormesh`): Image handle.
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
x1_mesh = cut_plane.df.x1.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
x2_mesh = cut_plane.df.x2.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
v_mesh = cut_plane.df.v.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
w_mesh = cut_plane.df.w.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
# plot the stream plot
ax.streamplot(
(x1_mesh[::downSamp, ::downSamp]),
(x2_mesh[::downSamp, ::downSamp]),
v_mesh[::downSamp, ::downSamp],
w_mesh[::downSamp, ::downSamp],
# scale=80.0,
# alpha=0.75,
# **kwargs
)
# ax.quiverkey(QV1, -.75, -0.4, 1, '1 m/s', coordinates='data')
# Make equal axis
# ax.set_aspect('equal')
def reverse_cut_plane_x_axis_in_plot(ax):
"""
Shortcut method to reverse direction of x-axis.
Args:
ax (:py:class:`matplotlib.pyplot.axes`): Figure axes.
"""
ax.invert_xaxis()
|
py | b40672e2a4b7ad4c05724e50066bf2df89324104 | import mimetypes
import urllib.parse
from xml.etree import ElementTree
import requests
class Error(Exception):
pass
class MediaRender:
# FIXME: # gmediarender глючит при volume -> pause(0) и не сразу понижает громкость, мб это фича
MAX_FAILURES = 5
STATE_FIX = {
'PLAYING': 'play',
'STOPPED': 'stop',
'PAUSED_PLAYBACK': 'pause',
'NO_MEDIA_PRESENT': 'stop'
}
def __init__(self, data: dict):
self._data = data
self.broken = not (self._data and isinstance(data, dict))
self._failures = 0
self.log_cb = None
@property
def pretty_name(self):
return '{}[{}]'.format(self._data['name'], self._data['url']) if self._data else 'Wrong data'
def pause(self, mode=None):
if self.broken:
return
if mode is None:
mode = 1 if self.state() == 'play' else 0
if not mode:
self._send_request('AVTransport', 'Play', Speed=1)
else:
self._send_request('AVTransport', 'Pause')
@property
def volume(self):
if self.broken:
return -1
r = self._send_request('RenderingControl', 'GetVolume', Channel='Master')
return self._parse_response(r, 'CurrentVolume', -1)
@volume.setter
def volume(self, val):
if self.broken:
return
self._send_request('RenderingControl', 'SetVolume', DesiredVolume=val)
def state(self) -> str:
# STOPPED, PLAYING, TRANSITIONING, PAUSED_PLAYBACK, PAUSED_RECORDING, RECORDING, NO_MEDIA_PRESENT, CUSTOM;
state = 'stop'
if self.broken:
return state
r = self._send_request('AVTransport', 'GetTransportInfo')
state = self._parse_response(r, 'CurrentTransportState', state)
return self.STATE_FIX.get(state, state)
def currentsong(self) -> dict:
# TODO
if self.broken:
return {'title': 'broken', 'artist': 'broken'}
return {'title': 'notimplemented', 'artist': 'notimplemented'}
def play(self, uri):
if self.broken or not uri:
return
m_data = '<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/" xmlns:dc="' \
'http://purl.org/dc/elements/1.1/" xmlns:sec="http://www.sec.co.kr/" ' \
'xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">' \
'<item id="0" parentID="-1" restricted="false"><' \
'res protocolInfo="http-get:*:{type}:*">{uri}</res></item></DIDL-Lite>' \
';'.format(uri=uri, type=self._get_content_type(uri))
self._send_request('AVTransport', 'Stop')
self._send_request('AVTransport', 'SetAVTransportURI', CurrentURI=uri, CurrentURIMetaData=m_data)
self._send_request('AVTransport', 'Play', Speed=1)
def _log(self, msg):
if self.log_cb:
self.log_cb(msg)
def _send_request(self, transport, cmd, **args) -> str:
# https://github.com/sergejey/majordomo/blob/4096837c1c65dee65d3b9419096b1aa612cce39f/modules/app_player/libs/MediaRenderer/MediaRenderer.php#L96
args['InstanceID'] = args.get('InstanceID', 0)
body = '<?xml version="1.0" encoding="utf-8" standalone="yes"?>\r\n' \
'<s:Envelope s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" ' \
'xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">' \
'<s:Body><u:{cmd} xmlns:u="{type}">{args}</u:{cmd}></s:Body>' \
'</s:Envelope>'.format(
cmd=cmd, type=self._data[transport]['type'],
args=''.join('<{0}>{1}</{0}>'.format(key, val) for key, val in args.items())
).encode()
headers = {
'Host': self._data['url'],
'Content-Type': 'text/xml; charset="utf-8"',
'Content-Length': str(len(body)),
'SOAPAction': '"{}#{}"'.format(self._data[transport]['type'], cmd)
}
try:
r = requests.post(self._data[transport]['url'], headers=headers, data=body, verify=False)
if not r.ok:
r.raise_for_status()
except Exception as e:
self._failures += 1
msg = 'Media Render error: {}'.format(e)
if self._failures > self.MAX_FAILURES:
raise Error(msg)
self._log(msg)
return ''
else:
self._failures = 0
return r.text
@staticmethod
def _get_content_type(uri):
# noinspection PyBroadException
try:
type_ = requests.head(uri).headers.get('Content-Type', 'application/octet-stream')
if type_ == 'application/octet-stream':
type_ = mimetypes.guess_type(urllib.parse.urlparse(uri).path)[0]
type_ = 'audio/mpeg' if not type_ or type_ == 'application/octet-stream' else type_
except Exception:
return 'audio/mpeg'
return type_
def _parse_response(self, r: str, tag: str, default):
if r:
try:
root = ElementTree.fromstring(r)[0][0]
except (IndexError, ElementTree.ParseError) as e:
self._log('Reply parsing error: {}'.format(e))
return default
el = root.find(tag)
if el is not None:
return el.text
return default
|
py | b4067337fa76fc33d65af30071f08769129945cc | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum import keystore
from electrum.bitcoin import COIN, is_valid, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (block_explorer, block_explorer_info, format_time,
block_explorer_URL, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, bitcoin, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, Multisig_Wallet
from amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from fee_slider import FeeSlider
from electrum import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.send_tab, _('Send') )
tabs.addTab(self.receive_tab, _('Receive') )
if self.config.get('show_addresses_tab', False):
tabs.addTab(self.addresses_tab, _('Addresses'))
if self.config.get('show_utxo_tab', False):
tabs.addTab(self.utxo_tab, _('Coins'))
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.connect(self, SIGNAL('new_fx_quotes'), self.on_fx_quotes)
self.connect(self, SIGNAL('new_fx_history'), self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
def on_quotes(self, b):
self.emit(SIGNAL('new_fx_quotes'))
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_addresses_tab(self):
show = not self.config.get('show_addresses_tab', False)
self.config.set_key('show_addresses_tab', show)
if show:
self.tabs.insertTab(3, self.addresses_tab, _('Addresses'))
else:
i = self.tabs.indexOf(self.addresses_tab)
self.tabs.removeTab(i)
def toggle_utxo_tab(self):
show = not self.config.get('show_utxo_tab', False)
self.config.set_key('show_utxo_tab', show)
if show:
self.tabs.insertTab(3, self.utxo_tab, _('Coins'))
else:
i = self.tabs.indexOf(self.utxo_tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name')
+ ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
if os.path.exists(full_path):
self.show_critical(_("File exists"))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Addresses"), self.toggle_addresses_tab).setShortcut(QKeySequence("Ctrl+A"))
wallet_menu.addAction(_("Coins"), self.toggle_utxo_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
from electrum.wallet import Imported_Wallet
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address()
if addr:
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(False)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
grid.addWidget(self.rbf_checkbox, 5, 3)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.amount_e.textEdited.connect(self.reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def reset_max(self):
self.is_max = False
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is None:
return
rbf_policy = self.config.get('rbf_policy', 1)
if rbf_policy == 0:
b = True
elif rbf_policy == 1:
fee_rate = fee * 1000 / tx.estimated_size()
try:
c = self.config.reverse_dynfee(fee_rate)
b = c in [-1, 25]
except:
b = False
elif rbf_policy == 2:
b = False
self.rbf_checkbox.setVisible(b)
self.rbf_checkbox.setChecked(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = coins
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = (self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus()))
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.rbf_checkbox.isChecked()
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
confirm_amount = self.config.get('confirm_amount', COIN)
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
extra_fee = run_hook('get_additional_fee', self.wallet, tx)
if extra_fee:
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(extra_fee) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
from address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_addresses()
return self.wallet.get_spendable_coins(domain)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getOpenFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'w') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(unicode(line2.text()), str(line1.text()))
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(i+1)
return ''
labels = [ label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Public key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Public key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pubkey_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = str(address.text()).strip()
message = unicode(message.toPlainText()).encode('utf-8').strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot sign messages with this type of address.')
return
if not self.wallet.is_mine(address):
self.show_message('Address not in wallet.')
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = str(address.text()).strip()
message = unicode(message.toPlainText()).encode('utf-8').strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
if not bitcoin.is_p2pkh(address):
self.show_message('Cannot verify messages with this type of address.')
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels where exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
try:
from electrum.plot import plot_history
except ImportError as e:
self.show_error(str(e))
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet(BLACK_FG if get_address() else RED_FG)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
tx = self.wallet.sweep(get_pk(), self.network, self.config, get_address(), None)
if not tx:
self.show_message(_('No inputs found. (Note that inputs need to be confirmed)'))
return
self.warn_if_watching_only()
self.show_transaction(tx)
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
update_maxfee()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
def on_maxfee(x):
m = maxfee_e.get_amount()
if m: self.config.set_key('max_fee_rate', m)
self.fee_slider.update()
def update_maxfee():
d = self.config.is_dynfee()
maxfee_e.setDisabled(d)
maxfee_label.setDisabled(d)
maxfee_label = HelpLabel(_('Max static fee'), _('Max value of the static fee slider'))
maxfee_e = BTCkBEdit(self.get_decimal_point)
maxfee_e.setAmount(self.config.max_fee_rate())
maxfee_e.textChanged.connect(on_maxfee)
update_maxfee()
fee_widgets.append((maxfee_label, maxfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
rbf_policy = self.config.get('rbf_policy', 1)
rbf_label = HelpLabel(_('Propose Replace-By-Fee') + ':', '')
rbf_combo = QComboBox()
rbf_combo.addItems([_('Always'), _('If the fee is low'), _('Never')])
rbf_combo.setCurrentIndex(rbf_policy)
def on_rbf(x):
self.config.set_key('rbf_policy', x)
rbf_combo.currentIndexChanged.connect(on_rbf)
fee_widgets.append((rbf_label, rbf_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(block_explorer_info.keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
# Fiat Currency
hist_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_currencies()
update_history_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def run_network_dialog(self):
if not self.network:
self.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx)
|
py | b406736c0fa251246269481e7e15ab12847b9aba | # -*- coding: utf-8 -*-
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from config import config
from flask_bootstrap import Bootstrap
db = SQLAlchemy()
# bootstrap =
def create_app(config_name):
app = Flask(__name__)
print config_name,' config_name '
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# TODO liliangbin 路由与自定义错误页面
bootstrap = Bootstrap(app)
# 预测情况我们都用init_app函数来初始化,但是好像有问题,但是构造函数好像还可以自动的生成,应该不是问题
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
py | b4067400304a4a410569d8e48e5986a913e4cf4c | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test DDS driver
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2019 Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import pytest
from osgeo import gdal
pytestmark = pytest.mark.require_driver('DDS')
test_list = [
('DXT1', [11376, 57826, 34652, 32919]),
('DXT3', [12272, 59240, 34811, 7774]),
('DXT5', [12272, 59240, 34811, 10402]),
('ETC1', [9560, 57939, 30566]),
]
@pytest.mark.parametrize(
'compression,checksums',
test_list,
ids=[row[0] for row in test_list]
)
def test_dds(compression,checksums):
src_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')
ds = gdal.GetDriverByName('DDS').CreateCopy('/vsimem/out.dds', src_ds,
options=['FORMAT=' + compression])
assert ds
assert ds.RasterCount == len(checksums)
assert ds.GetMetadataItem('COMPRESSION', 'IMAGE_STRUCTURE') == compression
assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand
assert [ds.GetRasterBand(i+1).Checksum() for i in range(ds.RasterCount)] == checksums
def test_dds_no_compression():
ref_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')
ds = gdal.Open('data/dds/stefan_full_rgba_no_compression.dds')
assert ds
assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand
ref_checksum = [ref_ds.GetRasterBand(i+1).Checksum() for i in range(4)]
assert [ds.GetRasterBand(i+1).Checksum() for i in range(4)] == ref_checksum
|
py | b406742a43b5ad731c6726ba969ced6469437c82 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import sys, os
os.nice(15)
# In[ ]:
import integer_polyomino.assembly as ipa
import integer_polyomino.gpmap as gp
sys.path.append(os.path.join(os.getcwd(), "..", "src", "integer_polyomino", "scripts"))
import graph_topo
import plotly_utilities as pu
# In[ ]:
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode, iplot
import plotly.figure_factory as ff
import plotly
import pandas as pd
import numpy as np
import seaborn as sns
# In[ ]:
# %matplotlib inline
init_notebook_mode(connected=True)
sns.set(style="dark", context="talk")
# In[ ]:
data_dir = os.path.join(os.getcwd(), "..", "data", "V" + ipa.__version__)
if not os.path.exists(data_dir):
raise ValueError("Specify an existing directory")
# In[ ]:
p = dict()
p["n_genes"] = 3
p["low_colour"] = 0
p["gen_colour"] = 6
p["high_colour"] = 8
p["threshold"] = 25
p["phenotype_builds"] = p["n_genes"] * 50
p["fixed_table"] = False
p["determinism"] = 1
p["n_jiggle"] = 1000
p["table_file"] = os.path.join(data_dir, "PhenotypeTable_D{determinism}.txt".format(**p))
set_metric_file = os.path.join(data_dir, "SetMetrics_N{n_genes}_C{gen_colour}_T{threshold}_B{phenotype_builds}_Cx{high_colour}_J{n_jiggle}_D{determinism}_S{low_colour}.txt".format(**p))
genome_metric_file = "GenomeMetrics_N{n_genes}_C{gen_colour}_T{threshold}_B{phenotype_builds}_Cx{high_colour}_J{n_jiggle}_D{determinism}_S{low_colour}".format(**p)
file_hdf = os.path.join(data_dir, 'Processed_GenomeMetrics.h5')
# In[ ]:
with pd.HDFStore(file_hdf, mode='r') as store:
dfg = store[genome_metric_file]
# In[ ]:
row = 21
# In[ ]:
iplot(pu.pretty_genome_pie(dfg, row))
# In[ ]:
iplot(pu.pretty_genome_hbar(dfg, row))
# In[ ]:
iplot(pu.metric_subplots(dfg, [row], p, 'group'))
# In[ ]:
iplot(pu.distribution_metrics_phenotype(pID='{(3, 0)}', file_name = genome_metric_file, hdf=file_hdf))
# In[ ]:
|
py | b406754399851a45e8137a85c1678ee0fc64ad57 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub for pre-rename `tfx.dsl.components.base.base_executor`."""
from tfx.dsl.components.base import base_executor
from tfx.utils import deprecation_utils
BaseExecutor = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.base.base_executor.BaseExecutor',
name='tfx.dsl.components.base.base_executor.BaseExecutor',
func_or_class=base_executor.BaseExecutor)
EmptyExecutor = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.base.base_executor.EmptyExecutor',
name='tfx.dsl.components.base.base_executor.EmptyExecutor',
func_or_class=base_executor.EmptyExecutor)
|
py | b4067557bf20fa68958328446de09535b8bee537 | """Implementation of Suurballe's 1984 algorithm."""
|
py | b40675c89c8410cc4fc9a005f3b0670998736a56 | def colab_pdf(file_name, notebookpath="/content/drive/My Drive/Colab Notebooks/"):
import os
# Checking if file_name passed is a sring.
if not isinstance(file_name, str):
raise TypeError(
f"expected a string as file_name, but got {type(file_name)} instead."
)
# Using the defaults used by google.colab
drive_mount_point = "/content/drive/"
gdrive_home = os.path.join(drive_mount_point, "My Drive/")
# If the drive is not already mounted, attempt to mount it.
if not os.path.isdir(gdrive_home):
from google.colab import drive
drive.mount(drive_mount_point)
# Check if the notebook exists in the Drive.
if not os.path.isfile(os.path.join(notebookpath, file_name)):
raise ValueError(f"file '{file_name}' not found in path '{notebookpath}'.")
# Installing all the recommended packages.
get_ipython().system(
"apt update >> /dev/null && apt install texlive-xetex texlive-fonts-recommended texlive-generic-recommended >> /dev/null"
)
# If pdf with the same name exists, remove it.
pdf_file = os.path.join(gdrive_home, file_name.split(".")[0] + ".pdf")
if os.path.isfile(pdf_file):
os.remove(pdf_file)
# Attempt to convert to pdf and save it in Gdrive home dir using jupyter nbconvert command.
try:
get_ipython().system(
"jupyter nbconvert --output-dir='$gdrive_home' '$notebookpath''$file_name' --to pdf"
)
except:
return "nbconvert error"
# Attempt to download the file to system.
try:
from google.colab import files
file_name = file_name.split(".")[0] + ".pdf"
files.download(gdrive_home + file_name)
except:
return "File Download Unsuccessful. Saved in Google Drive"
return "File ready to be Downloaded and Saved to Drive"
|
py | b40676fa91c69fc3ce81500a37903d91151508cd | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 08:18:15 2017
@author: rachael
Compute the correlation integral over the COMs of peptides.
"""
from __future__ import absolute_import, division, print_function
from time import time
import clustering as cl
import gsd.hoomd
import os.path as op
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#compute corrdims for final frame for each run
import pdb
save_path=SSS
data_path=save_path
#Matlab setup
plt.ioff()
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
runs = 5
ats = {'contact':17,'optical':12}
#molno = 4
molnolabel = 10000
AAdlabel = AAA
SCdlabel = SCSCSC
BBdlabel = BBBB
dt = 1.0
emax = 294 #maximum length scale to compute correlation integral on
estep = 0.147 #distance steps to compute correlation integral at
tstart = 10 #timestep where to begin
tmax = 999 #final timestep at which to compute correlation integral
tskip = 100 #compute correlation integral at every 100 timesteps
combeadtypes = ['EA','EB']
markers = ['o','x','^','v','s']
fbase = 'mols'+str(molnolabel)+'_' + str(AAdlabel)+'-02-'\
+str(SCdlabel)+'-150-'+str(BBdlabel)+'_small_run'
framets = range(tstart,tmax,tskip)
fnames = []
for i in range(runs):
fname = op.join(data_path,fbase + str(i+1) + '.gsd')
fnames.append(fname)
start = time()
cemats = np.zeros([int(emax/estep),1+runs])
corrfig = plt.figure()
corrax = corrfig.add_subplot(111)
for t in framets:
for runi in range(runs):
#pdb.set_trace()
traj = gsd.hoomd.open(fnames[runi])
finalFrame = traj[t]
ind = []
for combeadtype in combeadtypes:
tind = finalFrame.particles.types.index(combeadtype)
ind += list(np.where(finalFrame.particles.typeid==tind)[0])
comlist = finalFrame.particles.position[ind]
cemat = cl.corrcalc(comlist,emax,estep)
corrax.plot(np.log(cemat[0,:]),np.log(cemat[1,:]),markers[runi])
cemats[:,0] = cemat[0,:]
cemats[:,runi+1] = cemat[1,:]
corrax.grid('on')
corrax.set_xlabel(r'$\log(\epsilon/\epsilon_0)$ $(d^*)$')
corrax.set_ylabel(r'$ \log(C(\epsilon))$')
corrfig.savefig(op.join(save_path,fbase+'-corrcalc'+str(t)),
bbox_inches='tight')
corrfi = open(op.join(save_path,fbase+'-corrcalc'+str(t)+'.dat'),'w')
for e in range(np.shape(cemats)[0]):
for runi in range(np.shape(cemats)[1]):
corrfi.write('{0} '.format(cemats[e,runi]))
corrfi.write('\n')
corrfi.close()
end = time()
print("Time to compute correlation integral: ",end-start)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.