filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_23838 | """Support for Telldus Live."""
import asyncio
from functools import partial
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_UPDATE_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from . import config_flow # noqa pylint_disable=unused-import
from .const import (
CONF_HOST, DOMAIN, KEY_HOST, KEY_SCAN_INTERVAL, KEY_SESSION,
MIN_UPDATE_INTERVAL, NOT_SO_PRIVATE_KEY, PUBLIC_KEY, SCAN_INTERVAL,
SIGNAL_UPDATE_ENTITY, TELLDUS_DISCOVERY_NEW)
APPLICATION_NAME = 'Home Assistant'
REQUIREMENTS = ['tellduslive==0.10.10']
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN:
vol.Schema({
vol.Optional(CONF_HOST, default=DOMAIN): cv.string,
vol.Optional(CONF_UPDATE_INTERVAL, default=SCAN_INTERVAL):
(vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)))
}),
},
extra=vol.ALLOW_EXTRA,
)
DATA_CONFIG_ENTRY_LOCK = 'tellduslive_config_entry_lock'
CONFIG_ENTRY_IS_SETUP = 'telldus_config_entry_is_setup'
NEW_CLIENT_TASK = 'telldus_new_client_task'
INTERVAL_TRACKER = '{}_INTERVAL'.format(DOMAIN)
async def async_setup_entry(hass, entry):
"""Create a tellduslive session."""
from tellduslive import Session
conf = entry.data[KEY_SESSION]
if KEY_HOST in conf:
# Session(**conf) does blocking IO when
# communicating with local devices.
session = await hass.async_add_executor_job(partial(Session, **conf))
else:
session = Session(
PUBLIC_KEY,
NOT_SO_PRIVATE_KEY,
application=APPLICATION_NAME,
**conf,
)
if not session.is_authorized:
_LOGGER.error('Authentication Error')
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[NEW_CLIENT_TASK] = hass.loop.create_task(
async_new_client(hass, session, entry))
return True
async def async_new_client(hass, session, entry):
"""Add the hubs associated with the current client to device_registry."""
interval = entry.data[KEY_SCAN_INTERVAL]
_LOGGER.debug('Update interval %s seconds.', interval)
client = TelldusLiveClient(hass, entry, session, interval)
hass.data[DOMAIN] = client
dev_reg = await hass.helpers.device_registry.async_get_registry()
for hub in await client.async_get_hubs():
_LOGGER.debug("Connected hub %s", hub['name'])
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, hub['id'])},
manufacturer='Telldus',
name=hub['name'],
model=hub['type'],
sw_version=hub['version'],
)
await client.update()
async def async_setup(hass, config):
"""Set up the Telldus Live component."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
data={
KEY_HOST: config[DOMAIN].get(CONF_HOST),
KEY_SCAN_INTERVAL: config[DOMAIN].get(CONF_UPDATE_INTERVAL),
}))
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
if not hass.data[NEW_CLIENT_TASK].done():
hass.data[NEW_CLIENT_TASK].cancel()
interval_tracker = hass.data.pop(INTERVAL_TRACKER)
interval_tracker()
await asyncio.wait([
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in hass.data.pop(CONFIG_ENTRY_IS_SETUP)
])
del hass.data[DOMAIN]
del hass.data[DATA_CONFIG_ENTRY_LOCK]
return True
class TelldusLiveClient:
"""Get the latest data and update the states."""
def __init__(self, hass, config_entry, session, interval):
"""Initialize the Tellus data object."""
self._known_devices = set()
self._device_infos = {}
self._hass = hass
self._config_entry = config_entry
self._client = session
self._interval = interval
async def async_get_hubs(self):
"""Return hubs registered for the user."""
clients = await self._hass.async_add_executor_job(
self._client.get_clients)
return clients or []
def device_info(self, device_id):
"""Return device info."""
return self._device_infos.get(device_id)
@staticmethod
def identify_device(device):
"""Find out what type of HA component to create."""
if device.is_sensor:
return 'sensor'
from tellduslive import (DIM, UP, TURNON)
if device.methods & DIM:
return 'light'
if device.methods & UP:
return 'cover'
if device.methods & TURNON:
return 'switch'
if device.methods == 0:
return 'binary_sensor'
_LOGGER.warning("Unidentified device type (methods: %d)",
device.methods)
return 'switch'
async def _discover(self, device_id):
"""Discover the component."""
device = self._client.device(device_id)
component = self.identify_device(device)
self._device_infos.update({
device_id:
await self._hass.async_add_executor_job(device.info)
})
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if component not in self._hass.data[CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(component)
device_ids = []
if device.is_sensor:
for item in device.items:
device_ids.append((device.device_id, item.name, item.scale))
else:
device_ids.append(device_id)
for _id in device_ids:
async_dispatcher_send(
self._hass, TELLDUS_DISCOVERY_NEW.format(component, DOMAIN),
_id)
async def update(self, *args):
"""Periodically poll the servers for current state."""
try:
if not await self._hass.async_add_executor_job(
self._client.update):
_LOGGER.warning('Failed request')
return
dev_ids = {dev.device_id for dev in self._client.devices}
new_devices = dev_ids - self._known_devices
# just await each discover as `gather` use up all HTTPAdapter pools
for d_id in new_devices:
await self._discover(d_id)
self._known_devices |= new_devices
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
finally:
self._hass.data[INTERVAL_TRACKER] = async_call_later(
self._hass, self._interval, self.update)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
|
the-stack_0_23840 | """
Copyright (C) 2020 Marcin Rybacki
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<[email protected]>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
import unittest
import QuantLib as ql
BASIS_POINT = 1e-4
EPSILON = 1.e-10
OPTION_TYPE_MAP = {ql.Swap.Receiver: 'Receiver',
ql.Swap.Payer: 'Payer'}
SETTLEMENT_TYPE_MAP = {ql.Settlement.Physical: 'Physical',
ql.Settlement.Cash: 'Cash'}
SETTLEMENT_METHOD_MAP = {ql.Settlement.PhysicalOTC: 'Physical OTC',
ql.Settlement.CollateralizedCashPrice: (
'Collateralized Cash Price'),
ql.Settlement.ParYieldCurve: 'Par Yield Curve'}
def compounded_annual_constant_rate_discount(
rate,
day_counter):
def _calc(start, end):
time = day_counter.yearFraction(start, end)
return (1.0 + rate) ** (-time)
return _calc
def par_yield_bps(underlying,
discount_handle):
fixed_leg = underlying.fixedLeg()
first_coupon = ql.as_fixed_rate_coupon(fixed_leg[0])
discount_date = first_coupon.accrualStartDate()
discount = discount_handle.discount(discount_date)
fixed_rate = underlying.fixedRate()
fixed_dct = underlying.fixedDayCount()
fair_rate = underlying.fairRate()
ir_func = compounded_annual_constant_rate_discount(fair_rate, fixed_dct)
bps = sum([ir_func(discount_date, c_f.date()) * c_f.amount() / fixed_rate
for c_f in fixed_leg
if c_f.date() > discount_date])
return abs(bps) * discount
def swap_pv01(underlying):
return abs(underlying.fixedLegBPS()) / BASIS_POINT
def make_const_black_vol_engine(discount_handle, volatility):
h = ql.QuoteHandle(ql.SimpleQuote(volatility))
return ql.BlackSwaptionEngine(discount_handle, h)
def make_const_bachelier_vol_engine(discount_handle, volatility):
h = ql.QuoteHandle(ql.SimpleQuote(volatility))
return ql.BachelierSwaptionEngine(discount_handle, h)
class SwaptionTest(unittest.TestCase):
def setUp(self):
self.calendar = ql.TARGET()
self.today = self.calendar.adjust(ql.Date.todaysDate())
ql.Settings.instance().evaluationDate = self.today
projection_curve_handle = ql.RelinkableYieldTermStructureHandle()
self.projection_rate = 0.01
self.projection_quote_handle = ql.RelinkableQuoteHandle()
projection_curve = ql.FlatForward(
self.today, self.projection_quote_handle, ql.Actual365Fixed())
projection_curve_handle.linkTo(projection_curve)
self.discount_handle = ql.YieldTermStructureHandle(ql.FlatForward(
self.today, ql.QuoteHandle(ql.SimpleQuote(0.0085)), ql.Actual365Fixed()))
self.swap_engine = ql.DiscountingSwapEngine(self.discount_handle)
self.idx = ql.Euribor6M(projection_curve_handle)
self.exercises = [ql.Period(1, ql.Years), ql.Period(2, ql.Years),
ql.Period(3, ql.Years), ql.Period(5, ql.Years),
ql.Period(7, ql.Years), ql.Period(10, ql.Years)]
self.lengths = [ql.Period(1, ql.Years), ql.Period(2, ql.Years),
ql.Period(3, ql.Years), ql.Period(5, ql.Years),
ql.Period(7, ql.Years), ql.Period(10, ql.Years),
ql.Period(15, ql.Years), ql.Period(20, ql.Years)]
self.swap_type = [ql.Swap.Receiver, ql.Swap.Payer]
def tearDown(self):
ql.Settings.instance().evaluationDate = ql.Date()
def _assert_swaption_annuity(self,
swaption_pricer_func,
use_bachelier_vol):
self.projection_quote_handle.linkTo(
ql.SimpleQuote(self.projection_rate))
settle_type = ql.Settlement.Cash
methods = [ql.Settlement.ParYieldCurve,
ql.Settlement.CollateralizedCashPrice]
for e in self.exercises:
for l in self.lengths:
for t in self.swap_type:
for m in methods:
volatility = 0.003 if use_bachelier_vol else 0.3
strike = 0.03
swaption_engine = swaption_pricer_func(
self.discount_handle, volatility)
exercise_date = self.calendar.advance(
self.today, e)
start_date = self.calendar.advance(
exercise_date, ql.Period(2, ql.Days))
underlying = ql.MakeVanillaSwap(
l, self.idx, strike, ql.Period(0, ql.Days),
effectiveDate=start_date,
fixedLegTenor=ql.Period(1, ql.Years),
fixedLegDayCount=ql.Thirty360(),
floatingLegSpread=0.0,
swapType=t)
underlying.setPricingEngine(self.swap_engine)
swaption = ql.Swaption(underlying,
ql.EuropeanExercise(
exercise_date),
settle_type,
m)
swaption.setPricingEngine(swaption_engine)
annuity = swaption.annuity()
expected_annuity = 0.0
if (m == ql.Settlement.CollateralizedCashPrice):
expected_annuity = swap_pv01(underlying)
if (m == ql.Settlement.ParYieldCurve):
expected_annuity = par_yield_bps(
underlying, self.discount_handle)
fail_msg = """ Swaption annuity test failed for:
option tenor: {option_tenor}
volatility : {volatility}
option type: {option_type}
swap tenor: {swap_tenor}
strike: {strike}
settlement: {settle_type}
method: {method}
annuity: {annuity}
replicated annuity: {expected_annuity}
""".format(option_tenor=e,
volatility=volatility,
option_type=OPTION_TYPE_MAP[t],
swap_tenor=l,
strike=strike,
settle_type=SETTLEMENT_TYPE_MAP[settle_type],
method=SETTLEMENT_METHOD_MAP[m],
annuity=annuity,
expected_annuity=expected_annuity)
self.assertAlmostEquals(
first=annuity,
second=expected_annuity,
delta=EPSILON,
msg=fail_msg)
def test_swaption_annuity_black_model(self):
"""Testing swaption annuity in Black model"""
self._assert_swaption_annuity(
swaption_pricer_func=make_const_black_vol_engine,
use_bachelier_vol=False)
def test_swaption_annuity_bachelier_model(self):
"""Testing swaption annuity in Bachelier model"""
self._assert_swaption_annuity(
swaption_pricer_func=make_const_bachelier_vol_engine,
use_bachelier_vol=True)
if __name__ == "__main__":
print("testing QuantLib " + ql.__version__)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SwaptionTest, "test"))
unittest.TextTestRunner(verbosity=2).run(suite)
|
the-stack_0_23841 | def corrupt_part_data_on_disk(node, table, part_name):
part_path = node.query("SELECT path FROM system.parts WHERE table = '{}' and name = '{}'"
.format(table, part_name)).strip()
corrupt_part_data_by_path(node, part_path)
def corrupt_part_data_by_path(node, part_path):
print("Corrupting part", part_path, "at", node.name)
print("Will corrupt: ",
node.exec_in_container(['bash', '-c', 'cd {p} && ls *.bin | head -n 1'.format(p=part_path)]))
node.exec_in_container(['bash', '-c',
'cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c \'echo "1" >> $1\' -- {{}}'.format(
p=part_path)], privileged=True)
|
the-stack_0_23842 | from nose.tools import assert_equal
import os
import csv
from collections import deque
from mock import MagicMock
import urllib.parse
import grequests
from download_logs.aws_lambda import AWSLambda
def get_file(path, filename):
return open(get_path(path, filename), 'r')
def get_path(path, filename):
return os.path.join(*path, filename)
def get_last_row(csv_filename):
with open(csv_filename, 'r') as f:
lastrow = None
lastrow = deque(csv.reader(f, delimiter="\t"), 1)[0]
return lastrow
def test_base():
log_file = "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log"
aws_lambda = AWSLambda("MockBucket", log_file)
assert_equal(aws_lambda.bucket, "MockBucket")
assert_equal(aws_lambda.filename, log_file)
def test_process():
expected = [{
'timestamp': '1495792799',
'status': '206',
'file_downloaded': '/government/uploads/system/uploads/attachment_data/file/224634/Children_travelling_to_the_UK_leaflet_A5_WEB_final.pdf',
'ip': '11.111.111.111',
'referrer': '',
'user_agent': 'Mozilla/5.0 (Windows Phone 8.1; ARM; Trident/7.0; Touch; rv:11.0; IEMobile/11.0; Microsoft; Lumia 640 LTE) like Gecko',
'ga_client_id': 'GA1.1.1111111111.1111111111'
},
{
'timestamp': '1495792859',
'status': '200',
'file_downloaded': '/government/uploads/system/uploads/attachment_data/file/417696/Archived-information_sharing_guidance_for_practitioners_and_managers.pdf',
'ip': '11.111.111.111',
'referrer': 'https://www.bing.com/',
'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',
'ga_client_id': 'GA1.1.1111111111.1111111111'
}]
aws_lambda = AWSLambda("MockBucket", "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log")
test_log_file = get_file(['tests'], aws_lambda.filename)
AWSLambda.open_for_read = MagicMock(return_value=test_log_file)
processed_values = aws_lambda.process()
assert_equal(processed_values, expected)
def test_transform_row():
expected = {'timestamp': '1495792859',
'status': '200',
'file_downloaded': '/government/uploads/system/uploads/attachment_data/file/417696/Archived-information_sharing_guidance_for_practitioners_and_managers.pdf',
'ip': '11.111.111.111',
'referrer': 'https://www.bing.com/',
'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',
'ga_client_id': 'GA1.1.1111111111.1111111111'}
aws_lambda = AWSLambda("MockBucket", "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log")
path = get_path(['tests'], aws_lambda.filename)
row = get_last_row(path)
transformed_row = aws_lambda.transform_row(row)
assert_equal(transformed_row, expected)
def test_parse_client_id():
expected = "1111111111.1111111111"
aws_lambda = AWSLambda("MockBucket", "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log")
client_id = aws_lambda.parse_client_id('GA1.1.1111111111.1111111111')
assert_equal(client_id, expected)
def test_construct_url():
property_id = 'UA-26179049-7'
ga_client_id = '1111111111.1111111111'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586'
file = '/government/uploads/system/uploads/attachment_data/file/417696/Archived-information_sharing_guidance_for_practitioners_and_managers.pdf'
referrer = 'https://www.bing.com/'
ip = '11.111.111.111'
download_data = {'timestamp': '1495792859',
'status': '200',
'file_downloaded': file,
'ip': ip,
'referrer': referrer,
'user_agent': user_agent,
'ga_client_id': ga_client_id}
aws_lambda = AWSLambda("MockBucket", "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log")
url = aws_lambda.construct_url(download_data)
latency = aws_lambda.calculate_time_delta(download_data['timestamp'])
params = urllib.parse.urlencode({
'v': 1,
'tid': property_id,
'cid': ga_client_id,
't': 'event',
'ec': 'Download from External Source',
'ea': file,
'el': referrer,
'ua': user_agent,
'uip': ip,
'dr': referrer,
'cd13': user_agent,
'cd14': ga_client_id,
'qt': latency
})
expected_url = "http://www.google-analytics.com/collect?{0}".format(params)
assert_equal(url, expected_url)
def test_send_events_to_GA():
processed_values = [{'timestamp': '1495792859',
'status': '200',
'file_downloaded': '/government/uploads/system/uploads/attachment_data/file/417696/Archived-information_sharing_guidance_for_practitioners_and_managers.pdf',
'ip': '11.111.111.111',
'referrer': 'https://www.bing.com/',
'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',
'ga_client_id': 'GA1.1.1111111111.1111111111'}]
aws_lambda = AWSLambda("MockBucket", "2017-05-26T10-00-00.000-wZe41G6PdJYziQ8AAAAA.log")
aws_lambda.process = MagicMock(return_value=processed_values)
aws_lambda.construct_url = MagicMock(return_value="http://www.google-analytics.com/collect?{0}")
grequests.map = MagicMock(return_value=["<Response [200]>"])
response = aws_lambda.send_events_to_GA()
assert_equal(response, ["<Response [200]>"])
|
the-stack_0_23843 | # Copyright (c) 2022 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cinn
import os
import shutil
import numpy as np
import unittest
from cinn.auto_schedule.cost_model import CostModel
class TestCostModel(unittest.TestCase):
def test_cost_model_init(self):
with self.assertRaises(ValueError):
cost_model = CostModel(2)
cost_model = CostModel()
def test_basic_functions(self):
samples = [np.random.randn(5, 6) for i in range(16)]
labels = [1.0] * 16
cost_model = CostModel()
cost_model.train(samples, labels)
pred = cost_model.predict(samples)
path = "./test_cost_model.save_model"
cost_model.save(path)
load_cost_model = CostModel()
load_cost_model.load(path)
load_pred = load_cost_model.predict(samples)
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
np.testing.assert_almost_equal(pred, load_pred)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23844 | #! python3
import pyinputplus as pyip
import random, time
numberOfQuestions = 10
correctAnswers = 0
for questionNumber in range(numberOfQuestions):
# Pick two random numbers:
num1 = random.randint(0, 9)
num2 = random.randint(0, 9)
prompt = '#%s: %s x %s = ' % (questionNumber + 1, num1, num2)
try:
# Right answers are handled by allowRegexes.
# Wrong answers are handled by blockRegexes, with a custom message.
pyip.inputStr(prompt, allowRegexes=['^%s$' % (num1 * num2)],
blockRegexes=[('.*', 'Incorrect!')],
timeout=8, limit=3)
except pyip.TimeoutException:
print('Out of time!')
except pyip.RetryLimitException:
print('Out of tries!')
else:
# This block runs if no exceptions were raised in the try block.
print('Correct!')
correctAnswers += 1
time.sleep(1) # Brief pause to let user see the result.
print('Score: %s / %s' % (correctAnswers, numberOfQuestions)) |
the-stack_0_23845 | import unittest
from ctypes import *
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many in itializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4))
self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertTrue(t1 is t2)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23847 | import os
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
SITE_ID=1,
)
class FlatpageTemplateTagTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def test_get_flatpages_tag(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"The flatpage template tag retrives unregistered flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
@skipIfCustomUser
def test_get_flatpages_tag_for_user(self):
"The flatpage template tag retrives all flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,")
def test_get_flatpages_with_prefix(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"The flatpage template tag retrives unregistered prefixed flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Nested Flatpage,")
@skipIfCustomUser
def test_get_flatpages_with_prefix_for_user(self):
"The flatpage template tag retrive prefixed flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'location_prefix': '/location/'
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
render = lambda t: Template(t).render(Context())
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages for user as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf%}")
|
the-stack_0_23851 | import base64
import io
import json
import mimetypes
import os
import urllib.parse
import urllib.request
import warnings
from hashlib import blake2b
from typing import (
Iterable,
Union,
Dict,
Optional,
TypeVar,
Any,
Tuple,
List,
Type,
)
import numpy as np
from google.protobuf import json_format
from google.protobuf.field_mask_pb2 import FieldMask
from jina.types.struct import StructView
from .converters import png_to_buffer, to_datauri, guess_mime, to_image_blob
from ..arrays.chunk import ChunkArray
from ..arrays.match import MatchArray
from ..mixin import ProtoTypeMixin
from ..ndarray.generic import NdArray, BaseSparseNdArray
from ..score import NamedScore
from ...excepts import BadDocType
from ...helper import (
is_url,
typename,
random_identity,
download_mermaid_url,
dunder_get,
)
from ...importer import ImportExtensions
from ...logging import default_logger
from ...proto import jina_pb2
if False:
from scipy.sparse import coo_matrix
# fix type-hint complain for sphinx and flake
import scipy
import tensorflow as tf
import torch
EmbeddingType = TypeVar(
'EmbeddingType',
np.ndarray,
scipy.sparse.csr_matrix,
scipy.sparse.coo_matrix,
scipy.sparse.bsr_matrix,
scipy.sparse.csc_matrix,
torch.sparse_coo_tensor,
tf.SparseTensor,
)
SparseEmbeddingType = TypeVar(
'SparseEmbeddingType',
np.ndarray,
scipy.sparse.csr_matrix,
scipy.sparse.coo_matrix,
scipy.sparse.bsr_matrix,
scipy.sparse.csc_matrix,
torch.sparse_coo_tensor,
tf.SparseTensor,
)
__all__ = ['Document', 'DocumentContentType', 'DocumentSourceType']
DIGEST_SIZE = 8
DocumentContentType = TypeVar('DocumentContentType', bytes, str, np.ndarray)
DocumentSourceType = TypeVar(
'DocumentSourceType', jina_pb2.DocumentProto, bytes, str, Dict
)
_all_mime_types = set(mimetypes.types_map.values())
_all_doc_content_keys = ('content', 'uri', 'blob', 'text', 'buffer')
class Document(ProtoTypeMixin):
"""
:class:`Document` is one of the **primitive data type** in Jina.
It offers a Pythonic interface to allow users access and manipulate
:class:`jina.jina_pb2.DocumentProto` object without working with Protobuf itself.
To create a :class:`Document` object, simply:
.. highlight:: python
.. code-block:: python
from jina import Document
d = Document()
d.text = 'abc'
Jina requires each Document to have a string id. You can set a custom one,
or if non has been set a random one will be assigned.
Or you can use :class:`Document` as a context manager:
.. highlight:: python
.. code-block:: python
with Document() as d:
d.text = 'hello'
assert d.id # now `id` has value
To access and modify the content of the document, you can use :attr:`text`, :attr:`blob`, and :attr:`buffer`.
Each property is implemented with proper setter, to improve the integrity and user experience. For example,
assigning ``doc.blob`` or ``doc.embedding`` can be simply done via:
.. highlight:: python
.. code-block:: python
import numpy as np
# to set as content
d.content = np.random.random([10, 5])
# to set as embedding
d.embedding = np.random.random([10, 5])
MIME type is auto set/guessed when setting :attr:`content` and :attr:`uri`
:class:`Document` also provides multiple way to build from existing Document. You can build :class:`Document`
from ``jina_pb2.DocumentProto``, ``bytes``, ``str``, and ``Dict``. You can also use it as view (i.e.
weak reference when building from an existing ``jina_pb2.DocumentProto``). For example,
.. highlight:: python
.. code-block:: python
a = DocumentProto()
b = Document(a, copy=False)
a.text = 'hello'
assert b.text == 'hello'
You can leverage the :meth:`convert_a_to_b` interface to convert between content forms.
"""
def __init__(
self,
document: Optional[DocumentSourceType] = None,
field_resolver: Dict[str, str] = None,
copy: bool = False,
**kwargs,
):
"""
:param document: the document to construct from. If ``bytes`` is given
then deserialize a :class:`DocumentProto`; ``dict`` is given then
parse a :class:`DocumentProto` from it; ``str`` is given, then consider
it as a JSON string and parse a :class:`DocumentProto` from it; finally,
one can also give `DocumentProto` directly, then depending on the ``copy``,
it builds a view or a copy from it.
:param copy: when ``document`` is given as a :class:`DocumentProto` object, build a
view (i.e. weak reference) from it or a deep copy from it.
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param kwargs: other parameters to be set _after_ the document is constructed
.. note::
When ``document`` is a JSON string or Python dictionary object, the constructor will only map the values
from known fields defined in Protobuf, all unknown fields are mapped to ``document.tags``. For example,
.. highlight:: python
.. code-block:: python
d = Document({'id': '123', 'hello': 'world', 'tags': {'good': 'bye'}})
assert d.id == '123' # true
assert d.tags['hello'] == 'world' # true
assert d.tags['good'] == 'bye' # true
"""
self._pb_body = jina_pb2.DocumentProto()
try:
if isinstance(document, jina_pb2.DocumentProto):
if copy:
self._pb_body.CopyFrom(document)
else:
self._pb_body = document
elif isinstance(document, (dict, str)):
if isinstance(document, str):
document = json.loads(document)
if field_resolver:
document = {
field_resolver.get(k, k): v for k, v in document.items()
}
user_fields = set(document.keys())
support_fields = set(
self.attributes(
include_proto_fields_camelcase=True, include_properties=False
)
)
if support_fields.issuperset(user_fields):
json_format.ParseDict(document, self._pb_body)
else:
_intersect = support_fields.intersection(user_fields)
_remainder = user_fields.difference(_intersect)
if _intersect:
json_format.ParseDict(
{k: document[k] for k in _intersect}, self._pb_body
)
if _remainder:
support_prop = set(
self.attributes(
include_proto_fields=False, include_properties=True
)
)
_intersect2 = support_prop.intersection(_remainder)
_remainder2 = _remainder.difference(_intersect2)
if _intersect2:
self.set_attributes(**{p: document[p] for p in _intersect2})
if _remainder2:
self._pb_body.tags.update(
{k: document[k] for k in _remainder}
)
elif isinstance(document, bytes):
# directly parsing from binary string gives large false-positive
# fortunately protobuf throws a warning when the parsing seems go wrong
# the context manager below converts this warning into exception and throw it
# properly
with warnings.catch_warnings():
warnings.filterwarnings(
'error', 'Unexpected end-group tag', category=RuntimeWarning
)
try:
self._pb_body.ParseFromString(document)
except RuntimeWarning as ex:
raise BadDocType(
f'fail to construct a document from {document}'
) from ex
elif isinstance(document, Document):
if copy:
self._pb_body.CopyFrom(document.proto)
else:
self._pb_body = document.proto
elif document is not None:
# note ``None`` is not considered as a bad type
raise ValueError(f'{typename(document)} is not recognizable')
except Exception as ex:
raise BadDocType(
f'fail to construct a document from {document}, '
f'if you are trying to set the content '
f'you may use "Document(content=your_content)"'
) from ex
if self._pb_body.id is None or not self._pb_body.id:
self.id = random_identity(use_uuid1=True)
# check if there are mutually exclusive content fields
if _contains_conflicting_content(**kwargs):
raise ValueError(
f'Document content fields are mutually exclusive, please provide only one of {_all_doc_content_keys}'
)
self.set_attributes(**kwargs)
self._mermaid_id = random_identity() #: for mermaid visualize id
def pop(self, *fields) -> None:
"""Remove the values from the given fields of this Document.
:param fields: field names
"""
for k in fields:
self._pb_body.ClearField(k)
def clear(self) -> None:
"""Remove all values from all fields of this Document."""
self._pb_body.Clear()
@property
def siblings(self) -> int:
"""
The number of siblings of the :class:``Document``
.. # noqa: DAR201
:getter: number of siblings
:setter: number of siblings
:type: int
"""
return self._pb_body.siblings
@siblings.setter
def siblings(self, value: int):
self._pb_body.siblings = value
@property
def weight(self) -> float:
"""
:return: the weight of the document
"""
return self._pb_body.weight
@weight.setter
def weight(self, value: float):
"""
Set the weight of the document.
:param value: the float weight of the document.
"""
self._pb_body.weight = value
@property
def modality(self) -> str:
"""
:return: the modality of the document."""
return self._pb_body.modality
@modality.setter
def modality(self, value: str):
"""Set the modality of the document.
:param value: The modality of the document
"""
self._pb_body.modality = value
@property
def content_hash(self):
"""Get the content hash of the document.
:return: the content_hash from the proto
"""
return self._pb_body.content_hash
@property
def tags(self) -> Dict:
"""Return the `tags` field of this Document as a Python dict
:return: a Python dict view of the tags.
"""
return StructView(self._pb_body.tags)
@tags.setter
def tags(self, value: Dict):
"""Set the `tags` field of this Document to a Python dict
:param value: a Python dict
"""
self._pb_body.tags.Clear()
self._pb_body.tags.update(value)
def _update(
self,
source: 'Document',
destination: 'Document',
fields: Optional[List[str]] = None,
) -> None:
"""Merge fields specified in ``fields`` from source to destination.
:param source: source :class:`Document` object.
:param destination: the destination :class:`Document` object to be merged into.
:param fields: a list of field names that included from destination document
.. note::
*. if ``fields`` is empty, then destination is overridden by the source completely.
*. ``destination`` will be modified in place, ``source`` will be unchanged.
*. the ``fields`` has value in destination while not in source will be preserved.
"""
# We do a safe update: only update existent (value being set) fields from source.
fields_can_be_updated = []
# ListFields returns a list of (FieldDescriptor, value) tuples for present fields.
present_fields = source._pb_body.ListFields()
for field_descriptor, _ in present_fields:
fields_can_be_updated.append(field_descriptor.name)
if not fields:
fields = fields_can_be_updated # if `fields` empty, update all fields.
for field in fields:
if (
field == 'tags'
): # For the tags, stay consistent with the python update method.
destination._pb_body.tags.update(source.tags)
else:
destination._pb_body.ClearField(field)
setattr(destination, field, getattr(source, field))
def update(
self,
source: 'Document',
fields: Optional[List[str]] = None,
) -> None:
"""Updates fields specified in ``fields`` from the source to current Document.
:param source: source :class:`Document` object.
:param fields: a list of field names that included from the current document,
if not specified, merge all fields.
.. note::
*. ``destination`` will be modified in place, ``source`` will be unchanged
"""
if fields and not isinstance(fields, list):
raise TypeError('Parameter `fields` must be list of str')
self._update(
source,
self,
fields=fields,
)
def update_content_hash(
self,
exclude_fields: Optional[Tuple[str]] = (
'id',
'chunks',
'matches',
'content_hash',
'parent_id',
),
include_fields: Optional[Tuple[str]] = None,
) -> None:
"""Update the document hash according to its content.
:param exclude_fields: a tuple of field names that excluded when computing content hash
:param include_fields: a tuple of field names that included when computing content hash
.. note::
"exclude_fields" and "include_fields" are mutually exclusive, use one only
"""
masked_d = jina_pb2.DocumentProto()
masked_d.CopyFrom(self._pb_body)
empty_doc = jina_pb2.DocumentProto()
if include_fields and exclude_fields:
raise ValueError(
'"exclude_fields" and "exclude_fields" are mutually exclusive, use one only'
)
if include_fields is not None:
FieldMask(paths=include_fields).MergeMessage(masked_d, empty_doc)
masked_d = empty_doc
elif exclude_fields is not None:
FieldMask(paths=exclude_fields).MergeMessage(
empty_doc, masked_d, replace_repeated_field=True
)
self._pb_body.content_hash = blake2b(
masked_d.SerializeToString(), digest_size=DIGEST_SIZE
).hexdigest()
@property
def id(self) -> str:
"""The document id in hex string, for non-binary environment such as HTTP, CLI, HTML and also human-readable.
it will be used as the major view.
:return: the id from the proto
"""
return self._pb_body.id
@property
def parent_id(self) -> str:
"""The document's parent id in hex string, for non-binary environment such as HTTP, CLI, HTML and also human-readable.
it will be used as the major view.
:return: the parent id from the proto
"""
return self._pb_body.parent_id
@id.setter
def id(self, value: Union[bytes, str, int]):
"""Set document id to a string value.
:param value: id as bytes, int or str
"""
self._pb_body.id = str(value)
@parent_id.setter
def parent_id(self, value: Union[bytes, str, int]):
"""Set document's parent id to a string value.
:param value: id as bytes, int or str
"""
self._pb_body.parent_id = str(value)
@property
def blob(self) -> 'np.ndarray':
"""Return ``blob``, one of the content form of a Document.
.. note::
Use :attr:`content` to return the content of a Document
:return: the blob content from the proto
"""
return NdArray(self._pb_body.blob).value
@blob.setter
def blob(self, value: Union['np.ndarray', 'jina_pb2.NdArrayProto', 'NdArray']):
"""Set the `blob` to :param:`value`.
:param value: the array value to set the blob
"""
self._update_ndarray('blob', value)
@property
def embedding(self) -> 'EmbeddingType':
"""Return ``embedding`` of the content of a Document.
:return: the embedding from the proto
"""
return NdArray(self._pb_body.embedding).value
def get_sparse_embedding(
self, sparse_ndarray_cls_type: Type[BaseSparseNdArray], **kwargs
) -> 'SparseEmbeddingType':
"""Return ``embedding`` of the content of a Document as an sparse array.
:param sparse_ndarray_cls_type: Sparse class type, such as `SparseNdArray`.
:param kwargs: Additional key value argument, for `scipy` backend, we need to set
the keyword `sp_format` as one of the scipy supported sparse format, such as `coo`
or `csr`.
:return: the embedding from the proto as an sparse array
"""
return NdArray(
self._pb_body.embedding,
sparse_cls=sparse_ndarray_cls_type,
is_sparse=True,
**kwargs,
).value
@embedding.setter
def embedding(self, value: Union['np.ndarray', 'jina_pb2.NdArrayProto', 'NdArray']):
"""Set the ``embedding`` of the content of a Document.
:param value: the array value to set the embedding
"""
self._update_ndarray('embedding', value)
def _update_sparse_ndarray(self, k, v, sparse_cls):
NdArray(
is_sparse=True,
sparse_cls=sparse_cls,
proto=getattr(self._pb_body, k),
).value = v
def _check_installed_array_packages(self):
from ... import JINA_GLOBAL
if JINA_GLOBAL.scipy_installed is None:
JINA_GLOBAL.scipy_installed = False
with ImportExtensions(required=False, pkg_name='scipy'):
import scipy
JINA_GLOBAL.scipy_installed = True
if JINA_GLOBAL.tensorflow_installed is None:
JINA_GLOBAL.tensorflow_installed = False
with ImportExtensions(required=False, pkg_name='tensorflow'):
import tensorflow
JINA_GLOBAL.tensorflow_installed = True
if JINA_GLOBAL.torch_installed is None:
JINA_GLOBAL.torch_installed = False
with ImportExtensions(required=False, pkg_name='torch'):
import torch
JINA_GLOBAL.torch_installed = True
def _update_if_sparse(self, k, v):
from ... import JINA_GLOBAL
v_valid_sparse_type = False
self._check_installed_array_packages()
if JINA_GLOBAL.scipy_installed:
import scipy
if scipy.sparse.issparse(v):
from ..ndarray.sparse.scipy import SparseNdArray
self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)
v_valid_sparse_type = True
if JINA_GLOBAL.tensorflow_installed:
import tensorflow
if isinstance(v, tensorflow.SparseTensor):
from ..ndarray.sparse.tensorflow import SparseNdArray
self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)
v_valid_sparse_type = True
if JINA_GLOBAL.torch_installed:
import torch
if isinstance(v, torch.Tensor) and v.is_sparse:
from ..ndarray.sparse.pytorch import SparseNdArray
self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)
v_valid_sparse_type = True
return v_valid_sparse_type
def _update_ndarray(self, k, v):
if isinstance(v, jina_pb2.NdArrayProto):
getattr(self._pb_body, k).CopyFrom(v)
elif isinstance(v, np.ndarray):
NdArray(getattr(self._pb_body, k)).value = v
elif isinstance(v, NdArray):
NdArray(getattr(self._pb_body, k)).is_sparse = v.is_sparse
NdArray(getattr(self._pb_body, k)).value = v.value
else:
v_valid_sparse_type = self._update_if_sparse(k, v)
if not v_valid_sparse_type:
raise TypeError(f'{k} is in unsupported type {typename(v)}')
@property
def matches(self) -> 'MatchArray':
"""Get all matches of the current document.
:return: the array of matches attached to this document
"""
return MatchArray(self._pb_body.matches, reference_doc=self)
@matches.setter
def matches(self, value: Iterable['Document']):
"""Get all chunks of the current document.
:param value: value to set
"""
self.pop('matches')
self.matches.extend(value)
@property
def chunks(self) -> 'ChunkArray':
"""Get all chunks of the current document.
:return: the array of chunks of this document
"""
return ChunkArray(self._pb_body.chunks, reference_doc=self)
@chunks.setter
def chunks(self, value: Iterable['Document']):
"""Get all chunks of the current document.
:param value: the array of chunks of this document
"""
self.pop('chunks')
self.chunks.extend(value)
def set_attributes(self, **kwargs):
"""Bulk update Document fields with key-value specified in kwargs
.. seealso::
:meth:`get_attrs` for bulk get attributes
:param kwargs: the keyword arguments to set the values, where the keys are the fields to set
"""
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
if k == 'chunks':
self.chunks.extend(v)
elif k == 'matches':
self.matches.extend(v)
else:
self._pb_body.ClearField(k)
getattr(self._pb_body, k).extend(v)
elif isinstance(v, dict):
self._pb_body.ClearField(k)
getattr(self._pb_body, k).update(v)
else:
if (
hasattr(Document, k)
and isinstance(getattr(Document, k), property)
and getattr(Document, k).fset
):
# if class property has a setter
setattr(self, k, v)
elif hasattr(self._pb_body, k):
# no property setter, but proto has this attribute so fallback to proto
setattr(self._pb_body, k, v)
else:
raise AttributeError(f'{k} is not recognized')
def get_attributes(self, *fields: str) -> Union[Any, List[Any]]:
"""Bulk fetch Document fields and return a list of the values of these fields
.. note::
Arguments will be extracted using `dunder_get`
.. highlight:: python
.. code-block:: python
d = Document({'id': '123', 'hello': 'world', 'tags': {'id': 'external_id', 'good': 'bye'}})
assert d.id == '123' # true
assert d.tags['hello'] == 'world' # true
assert d.tags['good'] == 'bye' # true
assert d.tags['id'] == 'external_id' # true
res = d.get_attrs_values(*['id', 'tags__hello', 'tags__good', 'tags__id'])
assert res == ['123', 'world', 'bye', 'external_id']
:param fields: the variable length values to extract from the document
:return: a list with the attributes of this document ordered as the args
"""
ret = []
for k in fields:
try:
value = getattr(self, k)
if value is None:
raise ValueError
ret.append(value)
except (AttributeError, ValueError):
default_logger.warning(
f'Could not get attribute `{typename(self)}.{k}`, returning `None`'
)
ret.append(None)
# unboxing if args is single
if len(fields) == 1:
ret = ret[0]
return ret
@property
def buffer(self) -> bytes:
"""Return ``buffer``, one of the content form of a Document.
.. note::
Use :attr:`content` to return the content of a Document
:return: the buffer bytes from this document
"""
return self._pb_body.buffer
@buffer.setter
def buffer(self, value: bytes):
"""Set the ``buffer`` to :param:`value`.
:param value: the bytes value to set the buffer
"""
self._pb_body.buffer = value
if value and not self._pb_body.mime_type:
with ImportExtensions(
required=False,
pkg_name='python-magic',
help_text=f'can not sniff the MIME type '
f'MIME sniffing requires brew install '
f'libmagic (Mac)/ apt-get install libmagic1 (Linux)',
):
import magic
self._pb_body.mime_type = magic.from_buffer(value, mime=True)
@property
def text(self):
"""Return ``text``, one of the content form of a Document.
.. note::
Use :attr:`content` to return the content of a Document
:return: the text from this document content
"""
return self._pb_body.text
@text.setter
def text(self, value: str):
"""Set the `text` to :param:`value`
:param value: the text value to set as content
"""
self._pb_body.text = value
self.mime_type = 'text/plain'
@property
def uri(self) -> str:
"""Return the URI of the document.
:return: the uri from this document proto
"""
return self._pb_body.uri
@uri.setter
def uri(self, value: str):
"""Set the URI of the document.
.. note::
:attr:`mime_type` will be updated accordingly
:param value: acceptable URI/URL, raise ``ValueError`` when it is not a valid URI
"""
self._pb_body.uri = value
self.mime_type = guess_mime(value)
@property
def mime_type(self) -> str:
"""Get MIME type of the document
:return: the mime_type from this document proto
"""
return self._pb_body.mime_type
@mime_type.setter
def mime_type(self, value: str):
"""Set MIME type of the document
:param value: the acceptable MIME type, raise ``ValueError`` when MIME type is not
recognizable.
"""
if value in _all_mime_types:
self._pb_body.mime_type = value
elif value:
# given but not recognizable, do best guess
r = mimetypes.guess_type(f'*.{value}')[0]
if r:
self._pb_body.mime_type = r
else:
self._pb_body.mime_type = value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.update_content_hash()
@property
def content_type(self) -> str:
"""Return the content type of the document, possible values: text, blob, buffer
:return: the type of content present in this document proto
"""
return self._pb_body.WhichOneof('content')
@property
def content(self) -> DocumentContentType:
"""Return the content of the document. It checks whichever field among :attr:`blob`, :attr:`text`,
:attr:`buffer` has value and return it.
.. seealso::
:attr:`blob`, :attr:`buffer`, :attr:`text`
:return: the value of the content depending on `:meth:`content_type`
"""
attr = self.content_type
if attr:
return getattr(self, attr)
@content.setter
def content(self, value: DocumentContentType):
"""Set the content of the document. It assigns the value to field with the right type.
.. seealso::
:attr:`blob`, :attr:`buffer`, :attr:`text`
:param value: the value from which to set the content of the Document
"""
if isinstance(value, bytes):
self.buffer = value
elif isinstance(value, str):
if _is_uri(value):
self.uri = value
else:
self.text = value
elif isinstance(value, np.ndarray):
self.blob = value
else:
# ``None`` is also considered as bad type
raise TypeError(f'{typename(value)} is not recognizable')
@property
def granularity(self):
"""Return the granularity of the document.
:return: the granularity from this document proto
"""
return self._pb_body.granularity
@granularity.setter
def granularity(self, value: int):
"""Set the granularity of the document.
:param value: the value of the granularity to be set
"""
self._pb_body.granularity = value
@property
def adjacency(self):
"""Return the adjacency of the document.
:return: the adjacency from this document proto
"""
return self._pb_body.adjacency
@adjacency.setter
def adjacency(self, value: int):
"""Set the adjacency of the document.
:param value: the value of the adjacency to be set
"""
self._pb_body.adjacency = value
@property
def score(self):
"""Return the score of the document.
:return: the score attached to this document as `:class:NamedScore`
"""
return NamedScore(self._pb_body.score)
@score.setter
def score(
self, value: Union[jina_pb2.NamedScoreProto, NamedScore, float, np.generic]
):
"""Set the score of the document.
You can assign a scala variable directly.
:param value: the value to set the score of the Document from
"""
if isinstance(value, jina_pb2.NamedScoreProto):
self._pb_body.score.CopyFrom(value)
elif isinstance(value, NamedScore):
self._pb_body.score.CopyFrom(value._pb_body)
elif isinstance(value, (float, int)):
self._pb_body.score.value = value
elif isinstance(value, np.generic):
self._pb_body.score.value = value.item()
else:
raise TypeError(f'score is in unsupported type {typename(value)}')
def convert_image_buffer_to_blob(self, color_axis: int = -1):
"""Convert an image buffer to blob
:param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis
"""
self.blob = to_image_blob(io.BytesIO(self.buffer), color_axis)
def convert_image_blob_to_uri(
self, width: int, height: int, resize_method: str = 'BILINEAR'
):
"""Assuming :attr:`blob` is a _valid_ image, set :attr:`uri` accordingly
:param width: the width of the blob
:param height: the height of the blob
:param resize_method: the resize method name
"""
png_bytes = png_to_buffer(self.blob, width, height, resize_method)
self.uri = 'data:image/png;base64,' + base64.b64encode(png_bytes).decode()
def convert_image_uri_to_blob(
self, color_axis: int = -1, uri_prefix: Optional[str] = None
):
"""Convert uri to blob
:param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis
:param uri_prefix: the prefix of the uri
"""
self.blob = to_image_blob(
(uri_prefix + self.uri) if uri_prefix else self.uri, color_axis
)
def convert_image_datauri_to_blob(self, color_axis: int = -1):
"""Convert data URI to image blob
:param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis
"""
req = urllib.request.Request(self.uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
buffer = fp.read()
self.blob = to_image_blob(io.BytesIO(buffer), color_axis)
def convert_buffer_to_blob(self, dtype=None, count=-1, offset=0):
"""Assuming the :attr:`buffer` is a _valid_ buffer of Numpy ndarray,
set :attr:`blob` accordingly.
:param dtype: Data-type of the returned array; default: float.
:param count: Number of items to read. ``-1`` means all data in the buffer.
:param offset: Start reading the buffer from this offset (in bytes); default: 0.
.. note::
One can only recover values not shape information from pure buffer.
"""
self.blob = np.frombuffer(self.buffer, dtype, count, offset)
def convert_blob_to_buffer(self):
"""Convert blob to buffer"""
self.buffer = self.blob.tobytes()
def convert_uri_to_buffer(self):
"""Convert uri to buffer
Internally it downloads from the URI and set :attr:`buffer`.
"""
if urllib.parse.urlparse(self.uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(
self.uri, headers={'User-Agent': 'Mozilla/5.0'}
)
with urllib.request.urlopen(req) as fp:
self.buffer = fp.read()
elif os.path.exists(self.uri):
with open(self.uri, 'rb') as fp:
self.buffer = fp.read()
else:
raise FileNotFoundError(f'{self.uri} is not a URL or a valid local path')
def convert_uri_to_datauri(self, charset: str = 'utf-8', base64: bool = False):
"""Convert uri to data uri.
Internally it reads uri into buffer and convert it to data uri
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
"""
if not _is_datauri(self.uri):
self.convert_uri_to_buffer()
self.uri = to_datauri(
self.mime_type, self.buffer, charset, base64, binary=True
)
def convert_buffer_to_uri(self, charset: str = 'utf-8', base64: bool = False):
"""Convert buffer to data uri.
Internally it first reads into buffer and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = to_datauri(self.mime_type, self.buffer, charset, base64, binary=True)
def convert_text_to_uri(self, charset: str = 'utf-8', base64: bool = False):
"""Convert text to data uri.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data.
Sometimes used for text data that frequently uses non-US-ASCII characters.
"""
self.uri = to_datauri(self.mime_type, self.text, charset, base64, binary=False)
def convert_uri_to_text(self):
"""Assuming URI is text, convert it to text"""
self.convert_uri_to_buffer()
self.text = self.buffer.decode()
def convert_content_to_uri(self):
"""Convert content in URI with best effort"""
if self.text:
self.convert_text_to_uri()
elif self.buffer:
self.convert_buffer_to_uri()
elif self.content_type:
raise NotImplementedError
def MergeFrom(self, doc: 'Document'):
"""Merge the content of target
:param doc: the document to merge from
"""
self._pb_body.MergeFrom(doc.proto)
def CopyFrom(self, doc: 'Document'):
"""Copy the content of target
:param doc: the document to copy from
"""
self._pb_body.CopyFrom(doc.proto)
def __mermaid_str__(self):
results = []
from google.protobuf.json_format import MessageToDict
content = MessageToDict(self._pb_body, preserving_proto_field_name=True)
_id = f'{self._mermaid_id[:3]}~Document~'
for idx, c in enumerate(self.chunks):
results.append(
f'{_id} --> "{idx + 1}/{len(self.chunks)}" {c._mermaid_id[:3]}~Document~: chunks'
)
results.append(c.__mermaid_str__())
for idx, c in enumerate(self.matches):
results.append(
f'{_id} ..> "{idx + 1}/{len(self.matches)}" {c._mermaid_id[:3]}~Document~: matches'
)
results.append(c.__mermaid_str__())
if 'chunks' in content:
content.pop('chunks')
if 'matches' in content:
content.pop('matches')
if content:
results.append(f'class {_id}{{')
for k, v in content.items():
if isinstance(v, (str, int, float, bytes)):
results.append(f'+{k} {str(v)[:10]}')
else:
results.append(f'+{k}({type(getattr(self, k, v))})')
results.append('}')
return '\n'.join(results)
def _mermaid_to_url(self, img_type: str) -> str:
"""
Rendering the current flow as a url points to a SVG, it needs internet connection
:param img_type: the type of image to be generated
:return: the url pointing to a SVG
"""
if img_type == 'jpg':
img_type = 'img'
mermaid_str = (
"""
%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#FFC666'}}}%%
classDiagram
"""
+ self.__mermaid_str__()
)
encoded_str = base64.b64encode(bytes(mermaid_str.strip(), 'utf-8')).decode(
'utf-8'
)
return f'https://mermaid.ink/{img_type}/{encoded_str}'
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(inline_display=True)
def plot(self, output: Optional[str] = None, inline_display: bool = False) -> None:
"""
Visualize the Document recursively.
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param inline_display: show image directly inside the Jupyter Notebook
"""
image_type = 'svg'
if output and output.endswith('jpg'):
image_type = 'jpg'
url = self._mermaid_to_url(image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
from jina.logging import default_logger
default_logger.info(f'Document visualization: {url}')
@property
def non_empty_fields(self) -> Tuple[str]:
"""Return the set fields of the current document that are not empty
:return: the tuple of non-empty fields
"""
return tuple(field[0].name for field in self.ListFields())
@staticmethod
def attributes(
include_proto_fields: bool = True,
include_proto_fields_camelcase: bool = False,
include_properties: bool = False,
) -> List[str]:
"""Return all attributes supported by the Document, which can be accessed by ``doc.attribute``
:param include_proto_fields: if set, then include all protobuf fields
:param include_proto_fields_camelcase: if set, then include all protobuf fields in CamelCase
:param include_properties: if set, then include all properties defined for Document class
:return: a list of attributes in string.
"""
import inspect
support_keys = []
if include_proto_fields:
support_keys = list(jina_pb2.DocumentProto().DESCRIPTOR.fields_by_name)
if include_proto_fields_camelcase:
support_keys += list(
jina_pb2.DocumentProto().DESCRIPTOR.fields_by_camelcase_name
)
if include_properties:
support_keys += [
name
for (name, value) in inspect.getmembers(
Document, lambda x: isinstance(x, property)
)
]
return list(set(support_keys))
def __getattr__(self, item):
if hasattr(self._pb_body, item):
value = getattr(self._pb_body, item)
else:
value = dunder_get(self._pb_body, item)
return value
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'} and is_url(value))
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return is_url(value) and scheme in {'data'}
def _contains_conflicting_content(**kwargs):
content_keys = 0
for k in kwargs.keys():
if k in _all_doc_content_keys:
content_keys += 1
if content_keys > 1:
return True
return False
|
the-stack_0_23852 | '''
Authors: Badri Adhikari, Jamie Lea, Bikash Shrestha, Jie Hou, and Matthew Bernardini
University of Missouri-St. Louis, 11-22-2020
File: Reconstruct 3D models with DISTFOLD using a predicted distance map and evaluate using TM-score
Options for building models: plain 2D numpy distance map, trRosetta .npz, CASP RR, .txt, or use distances from the PDB itself
'''
import argparse
import sys
import numpy as np
import re
import os
from math import sqrt
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import precision_score
from scipy.stats import pearsonr
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', type=str, required=False,
dest='native', help="true PDB file")
parser.add_argument('-f', type=str, required=False, dest='fasta',
help="FASTA file of the input distance map (for building models)")
parser.add_argument('-d', type=str, required=False, dest='dmap',
help="Predicted distance map as a 2D numpy array")
parser.add_argument('-t', type=int, required=False,
dest='threshold', default=12, help="Distance cutoff threshold")
parser.add_argument('-x', type=str, required=False,
dest='deepdist', help=".txt distance maps by DeepDist")
parser.add_argument('-c', type=str, required=False, dest='inputrr',
help="CASP RR file as input (all input rows are used)")
parser.add_argument('-r', type=str, required=False,
dest='trrosetta', help="trRosetta prediction")
parser.add_argument('-o', type=str, required=False,
dest='jobdir', help="Output directory")
parser.add_argument('-s', type=str, required=False, dest='ss',
help="3-class (H/C/E) Secondary structure file in FASTA format (for building models)")
parser.add_argument('-m', type=int, required=False, dest='minsep', default=2,
help="Minimum sequence separation (24 for long-range & 12 for medium+long-range)")
parser.add_argument('-p', required=False, dest='truedmap',
action='store_true', help='Use true distances from the PDB as input')
parser.add_argument('-b', required=False, dest='modeling3d',
action='store_true', help='Build 3D models using CNS')
args = parser.parse_args()
return args
def get_valid_amino_acids():
valid_amino_acids = {
'LLP': 'K', 'TPO': 'T', 'CSS': 'C', 'OCS': 'C', 'CSO': 'C', 'PCA': 'E', 'KCX': 'K',
'CME': 'C', 'MLY': 'K', 'SEP': 'S', 'CSX': 'C', 'CSD': 'C', 'MSE': 'M',
'ALA': 'A', 'ASN': 'N', 'CYS': 'C', 'GLN': 'Q', 'HIS': 'H', 'LEU': 'L',
'MET': 'M', 'MHO': 'M', 'PRO': 'P', 'THR': 'T', 'TYR': 'Y', 'ARG': 'R', 'ASP': 'D',
'GLU': 'E', 'GLY': 'G', 'ILE': 'I', 'LYS': 'K', 'PHE': 'F', 'SER': 'S',
'TRP': 'W', 'VAL': 'V', 'SEC': 'U'
}
return valid_amino_acids
def dmin_dmax_for_d(d):
dev = 0 # 0.125 * d
return (d - dev / 2.0, d + dev / 2.0)
def check_pdb_valid_row(l):
valid_amino_acids = get_valid_amino_acids()
if (get_pdb_rname(l) in valid_amino_acids.keys()) and (l.startswith('ATOM') or l.startswith('HETA')):
return True
return False
def get_pdb_atom_name(l):
return l[12: 16].strip()
def get_pdb_rnum(l):
return int(l[22: 27].strip())
def get_pdb_rname(l):
return l[17: 20].strip()
def get_pdb_xyz_cb(lines):
xyz = {}
for l in lines:
if get_pdb_atom_name(l) == 'CB':
xyz[get_pdb_rnum(l)] = (float(l[30:38].strip()), float(
l[38:46].strip()), float(l[46:54].strip()))
for l in lines:
if (get_pdb_rnum(l) not in xyz) and get_pdb_atom_name(l) == 'CA':
xyz[get_pdb_rnum(l)] = (float(l[30:38].strip()), float(
l[38:46].strip()), float(l[46:54].strip()))
return xyz
def get_pdb_xyz_ca(lines):
xyz = {}
for l in lines:
if get_pdb_atom_name(l) == 'CA':
xyz[get_pdb_rnum(l)] = (float(l[30:38].strip()), float(
l[38:46].strip()), float(l[46:54].strip()))
return xyz
def pdb2dmap(pdbfile):
valid_amino_acids = get_valid_amino_acids()
f = open(pdbfile, mode='r')
flines = f.read()
f.close()
lines = flines.splitlines()
templines = flines.splitlines()
for l in templines:
if not l.startswith('ATOM'): lines.remove(l)
# We have filtered out all non ATOMs at this point
rnum_rnames = {}
for l in lines:
atom = get_pdb_atom_name(l)
if atom != 'CA': continue
if not get_pdb_rname(l) in valid_amino_acids.keys():
print('' + get_pdb_rname(l) + ' is unknown amino acid in ' + l)
return
rnum_rnames[int(get_pdb_rnum(l))] = valid_amino_acids[get_pdb_rname(l)]
seq = ""
for i in range(max(rnum_rnames.keys())):
if i+1 not in rnum_rnames:
# print (rnum_rnames)
# print ('Warning! residue not defined for rnum = ' + str(i+1))
seq += '-'
else:
seq += rnum_rnames[i+1]
L = len(seq)
xyz_cb = get_pdb_xyz_cb(lines)
total_valid_residues = len(xyz_cb)
if len(xyz_cb) != L:
print(rnum_rnames)
for i in range(L):
if i+1 not in xyz_cb: print('XYZ not defined for ' + str(i+1))
print('Warning! Something went wrong - len of cbxyz != seqlen!! ' + \
str(len(xyz_cb)) + ' ' + str(L))
cb_map = np.full((L, L), np.nan)
for r1 in sorted(xyz_cb):
(a, b, c) = xyz_cb[r1]
for r2 in sorted(xyz_cb):
(p, q, r) = xyz_cb[r2]
cb_map[r1 - 1, r2 - 1] = sqrt((a-p)**2+(b-q)**2+(c-r)**2)
return (total_valid_residues, cb_map, rnum_rnames)
def seqfasta(fasta):
f = open(fasta)
lines = f.readlines()
f.close()
seq = ''
for l in lines:
if l.startswith('>'): continue
seq += l.strip()
return seq
def trrosetta_probindex2dist(index):
d = 1.75
for k in range(1, 37):
d += 0.5
if index == k:
return d
return d
def trrosetta2maps(trrosetta):
x = np.load(trrosetta)
a = x['dist']
if len(a[0, 0, :]) != 37:
print('ERROR! This does not look like a trRosetta prediction')
return
D = np.full((len(a), len(a)), 21.0)
for i in range(len(a)):
for j in range(len(a)):
maxprob_value = 0.0
for k in range(37):
if maxprob_value < a[i, j, k]:
maxprob_value = a[i, j, k]
D[i, j] = trrosetta_probindex2dist(k)
C = np.full((len(a), len(a)), 0.0)
for i in range(len(a)):
for j in range(i, len(a)):
for k in range(1, 13):
C[i, j] += a[i, j, k]
return (D, C)
def calc_dist_errors(P, Y, L, dist_thres=None, min_sep=None, top_l_by_x=None, pred_limit=None):
# The pred_limit needs to be 20 and not a very high value so a comparison with trRosetta is fair.
# The maximum predicted distance for a trRosetta is 20.5 but if other methods predict a higher distance
# they will be severely penalized, hence this cutoff.
if Y is None:
print('ERROR! Y is None!')
return
if P is None:
print('ERROR! P is None!')
return
if np.isnan(Y).all():
print('ERROR! Y is all NaNs!')
return
if np.isnan(P).all():
print('ERROR! P is all NaNs!')
return
errors = {}
errors['mae'] = np.nan
errors['mse'] = np.nan
errors['rmse'] = np.nan
errors['pearsonr'] = np.nan
errors['count-pred'] = np.nan
errors['count-true'] = np.nan
pred_dict = {}
true_dict = {}
for p in range(len(Y)):
for q in range(len(Y)):
if q - p < min_sep: continue
if np.isnan(P[p, q]): continue
if np.isnan(Y[p, q]): continue
if Y[p, q] >= dist_thres: continue
if np.isnan(Y[p, q]): continue
true_dict[(p, q)] = Y[p, q]
if P[p, q] >= pred_limit: continue
pred_dict[(p, q)] = P[p, q]
xl = round(L / top_l_by_x)
pred_list = []
true_list = []
for pair in sorted(pred_dict.items(), key=lambda x: x[1]):
if pair[0] not in true_dict: continue
pred_list.append(pred_dict[pair[0]])
true_list.append(true_dict[pair[0]])
xl -= 1
if xl == 0: break
if len(pred_list) > 1:
errors['mae'] = round(mean_absolute_error(true_list, pred_list), 4)
errors['mse'] = round(mean_squared_error(true_list, pred_list), 4)
errors['rmse'] = round(sqrt(errors['mse']), 4)
errors['pearsonr'] = round(pearsonr(true_list, pred_list)[0], 4)
errors['count-pred'] = len(pred_list)
errors['count-true'] = len(true_dict)
return errors
def calc_dist_errors_various_xl(P, Y, L, separation=[12, 24]):
all_metrics = {}
dist_thres = ['8', '12', '15', '1000']
topxl = {5: 'Top-L/5', 2: 'Top-L/2', 1: 'Top-L ', 0.000001: 'ALL '}
pred_cutoffs = [15.0] # This is taken from the LDDT's R value
for pt in pred_cutoffs:
for dt in dist_thres:
for sep in separation:
for xl in topxl.keys():
results = calc_dist_errors(P=P, Y=Y, L=L, dist_thres=int(
dt), min_sep=int(sep), top_l_by_x=xl, pred_limit=pt)
all_metrics["prediction-cut-off:" + str(
pt) + " native-thres:" + dt + " min-seq-sep:" + str(sep) + " xL:" + topxl[xl]] = results
return all_metrics
def calc_contact_errors_various_xl(CPRED, CTRUE, separation=[12, 24]):
all_metrics = {}
topxl = {'L/5': 'Top-L/5', 'L/2': 'Top-L/2',
'L': 'Top-L ', 'NC': 'Top-NC '}
for sep in separation:
for xl in topxl.keys():
results = calculate_contact_precision(
CPRED=CPRED, CTRUE=CTRUE, minsep=sep, topxl=xl)
all_metrics["min-seq-sep:" + \
str(sep) + " xL:" + topxl[xl]] = results
return all_metrics
def rr2dmap(filerr):
f = open(filerr)
lines = f.readlines()
f.close()
# Detect RMODE (if present)
mode = 0
for l in lines:
if l.startswith('RMODE 1') or l.startswith('RMODE 1'):
mode = 1
break
if l.startswith('RMODE 2') or l.startswith('RMODE 2'):
mode = 2
break
pindex = 4
if mode == 1 or mode == 2:
pindex = 2
# Detect target ID (if present)
tgt = ''
for l in lines:
if l.startswith('TARGET'):
cols = l.strip().split()
tgt = cols[1]
# Extract sequence
seq = ''
for l in lines:
if l.startswith('PFRMAT '): continue
if l.startswith('TARGET '): continue
if l.startswith('AUTHOR '): continue
if l.startswith('REMARK '): continue
if l.startswith('METHOD '): continue
if l.startswith('RMODE '): continue
if l.startswith('MODEL '): continue
if l.startswith('END'): continue
if l[0].isalpha():
seq += l.strip()
# Try to download if it is a CASP14 target
if mode != 0 and len(seq) < 1:
os.system('wget -O ' + tgt + \
'.fasta --content-disposition "http://predictioncenter.org/casp14/target.cgi?target=' + tgt + '&view=sequence\"')
f = open(tgt + '.fasta')
f.readline()
seq = f.readline().strip()
f.close()
L = len(seq)
# Absent values are NaNs
C = np.full((L, L), np.nan)
D = None
for l in lines:
if not l[0].isdigit(): continue
c = l.split()
C[int(c[0]) - 1, int(c[1]) - 1] = float(c[pindex])
# D[int(c[0]) - 1, int(c[1]) - 1] = 4.0 / (float(c[pindex]) + 0.01)
if mode == 2:
# Absent values are NaNs
D = np.full((L, L), np.nan)
for l in lines:
if not l[0].isdigit():
continue
c = l.strip().split()
i = int(c[0]) - 1
j = int(c[1]) - 1
max_prob_value = 0.0
max_prob_index = -1
if len(c) != 13:
print('ERROR! Unexpected number of columns in line:', l)
sys.exit()
# identify the maximum probability
for position in range(2, len(c)):
if float(c[position]) > max_prob_value:
max_prob_value = float(c[position])
max_prob_index = position
d = 3.0
for position in range(2, len(c)):
d += 2.0
if max_prob_index == position:
break
D[i, j] = d
D = np.clip(D, 0.0, 100.0)
return (D, C, seq)
def calculate_contact_precision(CPRED, CTRUE, minsep, topxl, LPDB=None):
errors = {}
errors['precision'] = np.nan
errors['count'] = np.nan
L = len(CPRED)
if LPDB is None: LPDB = len(np.where(~np.isnan(np.diagonal(CTRUE)))[0])
# The number of valid true values must be <= predicted
num_true = 0
for j in range(0, L):
for k in range(j, L):
try:
CTRUE[j, k]
except IndexError:
continue
if np.isnan(CTRUE[j, k]): continue
if abs(j - k) < minsep: continue
if CTRUE[j, k] > 1.0 or CTRUE[j, k] < 0.0: print(
"WARNING!! True contact at "+str(j)+" "+str(k)+" is "+str(CTRUE[j, k]))
num_true += 1
num_pred = 0
for j in range(0, L):
for k in range(j, L):
if np.isnan(CPRED[j, k]): continue
if abs(j - k) < minsep: continue
if CPRED[j, k] > 1.0 or CPRED[j, k] < 0.0: print(
"WARNING!! Predicted probability at "+str(j)+" "+str(k)+" is "+str(CPRED[j, k]))
num_pred += 1
if num_true < 1:
return errors
# Put predictions in a dictionary so they can be sorted
p_dict = {}
for j in range(0, L):
for k in range(j, L):
try:
CTRUE[j, k]
except IndexError:
continue
if np.isnan(CTRUE[j, k]): continue
if np.isnan(CPRED[j, k]): continue
if abs(j - k) < minsep: continue
p_dict[(j, k)] = CPRED[j, k]
# Obtain nc, the total number of contacts in the PDB
nc_count = 0
for j in range(0, L):
for k in range(j, L):
try:
CTRUE[j, k]
except IndexError:
continue
if np.isnan(CTRUE[j, k]): continue
if abs(j - k) < minsep: continue
if CTRUE[j, k] != 1: continue
nc_count += 1
if nc_count < 1:
return errors
# Obtain top xL predictions
xl = nc_count
if topxl == 'L/5': xl = round(0.2 * LPDB) # round() NOT int()
if topxl == 'L/2': xl = round(0.5 * LPDB) # round() NOT int()
if topxl == 'L': xl = LPDB
# This should actually be implemented, but sadly CASP does not do it
# if xl > nc_count: xl = nc_count
pred_list = []
true_list = []
for pair in reversed(sorted(p_dict.items(), key=lambda x: x[1])):
if np.isnan(CTRUE[pair[0][0], pair[0][0]]): continue
pred_list.append(1) # This is assumed to be a +ve prediction
true_list.append(CTRUE[pair[0][0], pair[0][1]])
xl -= 1
if xl == 0: break
errors['precision'] = round(precision_score(true_list, pred_list), 5)
errors['count'] = len(true_list)
return errors
def dmap2rr(P, seq, file_rr):
f = open(file_rr, 'w')
f.write(seq + '\n')
for j in range(0, len(P)):
for k in range(j, len(P)):
if abs(j - k) < minsep: continue
if P[j][k] > threshold: continue
(dmin, dmax) = dmin_dmax_for_d(P[j][k])
f.write("%d %d %0.2f %.2f 1.0\n" % (j+1, k+1, dmin, dmax))
f.close()
def evaltm(TM, pred, native):
os.system(TM + " " + pred + " " + native + \
" | grep -e RMSD\\ of -e TM-score\\ \\ \\ -e MaxSub-score -e GDT-TS-score -e GDT-HA-score > x.tmp")
f = open('x.tmp')
lines = f.readlines()
f.close()
os.system('rm x.tmp')
rmsd = None
tmsc = None
gdts = None
for l in lines:
if l.startswith('RMSD'): rmsd = float(re.sub("[^\d\.]", "", l)[:5])
for l in lines:
if l.startswith('TM-score'): tmsc = float(re.sub("[^\d\.]", "", l)[:5])
for l in lines:
if l.startswith('GDT-TS'): gdts = float(re.sub("[^\d\.]", "", l)[:5])
return (rmsd, tmsc, gdts)
# Helpers for metrics calculated using numpy scheme
def get_flattened(dmap):
if dmap.ndim == 1:
return dmap
elif dmap.ndim == 2:
return dmap[np.triu_indices_from(dmap, k=1)]
else:
assert False, "ERROR: the passes array has dimension not equal to 2 or 1!"
def get_separations(dmap):
t_indices = np.triu_indices_from(dmap, k=1)
separations = np.abs(t_indices[0] - t_indices[1])
return separations
# return a 1D boolean array indicating where the sequence separation in the
# upper triangle meets the threshold comparison
def get_sep_thresh_b_indices(dmap, thresh, comparator):
assert comparator in {'gt', 'lt', 'ge',
'le'}, "ERROR: Unknown comparator for thresholding!"
dmap_flat = get_flattened(dmap)
separations = get_separations(dmap)
if comparator == 'gt':
threshed = separations > thresh
elif comparator == 'lt':
threshed = separations < thresh
elif comparator == 'ge':
threshed = separations >= thresh
elif comparator == 'le':
threshed = separations <= thresh
return threshed
# return a 1D boolean array indicating where the distance in the
# upper triangle meets the threshold comparison
def get_dist_thresh_b_indices(dmap, thresh, comparator):
assert comparator in {'gt', 'lt', 'ge',
'le'}, "ERROR: Unknown comparator for thresholding!"
dmap_flat = get_flattened(dmap)
if comparator == 'gt':
threshed = dmap_flat > thresh
elif comparator == 'lt':
threshed = dmap_flat < thresh
elif comparator == 'ge':
threshed = dmap_flat >= thresh
elif comparator == 'le':
threshed = dmap_flat <= thresh
return threshed
# Calculate lDDT using numpy scheme
def get_LDDT(true_map, pred_map, R=15, sep_thresh=-1, T_set=[0.5, 1, 2, 4], precision=4):
'''
Mariani V, Biasini M, Barbato A, Schwede T.
lDDT: a local superposition-free score for comparing protein structures and models using distance difference tests.
Bioinformatics. 2013 Nov 1;29(21):2722-8.
doi: 10.1093/bioinformatics/btt473.
Epub 2013 Aug 27.
PMID: 23986568; PMCID: PMC3799472.
'''
# Helper for number preserved in a threshold
def get_n_preserved(ref_flat, mod_flat, thresh):
err = np.abs(ref_flat - mod_flat)
n_preserved = (err < thresh).sum()
return n_preserved
# flatten upper triangles
true_flat_map = get_flattened(true_map)
pred_flat_map = get_flattened(pred_map)
# Find set L
S_thresh_indices = get_sep_thresh_b_indices(true_map, sep_thresh, 'gt')
R_thresh_indices = get_dist_thresh_b_indices(true_flat_map, R, 'lt')
L_indices = S_thresh_indices & R_thresh_indices
true_flat_in_L = true_flat_map[L_indices]
pred_flat_in_L = pred_flat_map[L_indices]
# Number of pairs in L
L_n = L_indices.sum()
# Calculated lDDT
preserved_fractions = []
for _thresh in T_set:
_n_preserved = get_n_preserved(true_flat_in_L, pred_flat_in_L, _thresh)
_f_preserved = _n_preserved / L_n
preserved_fractions.append(_f_preserved)
lDDT = np.mean(preserved_fractions)
if precision > 0:
lDDT = round(lDDT, precision)
return lDDT
def disteval_main(native=None,
file_fasta=None,
dmap=None,
trrosetta=None,
deepdist=None,
inputrr=None,
ss=None,
truedmap=None,
modeling3d=None,
threshold=None,
job_dir=None,
minsep=None,
basename=None,
native_basename=None):
TM = os.path.dirname(os.path.abspath(__file__)) + '/TMscore'
DISTFOLD = os.path.dirname(os.path.abspath(__file__)) + '/distfold.pl'
if sys.version_info < (3, 0, 0):
print('Python 3 required!!!')
sys.exit(1)
# Prepare the native
l_for_xL = None
ND = None
NC = None
rnum_rnames = None
if native:
print('')
print('Load PDB..')
(l_for_xL, ND, rnum_rnames) = pdb2dmap(native)
print('True dmap: ', ND.shape)
print('Total valid residues:', l_for_xL)
NC = np.copy(ND)
NC[NC < 8.0] = 1
NC[NC >= 8.0] = 0
# Prepare the predicted distances and contacts
D = None
C = None
print('')
if trrosetta is not None:
print('Load the input trRosetta prediction..')
(D, C) = trrosetta2maps(trrosetta)
L = len(D)
if ND is not None and L != len(ND):
print('PDB is smaller! Trimming prediction..', L, len(ND))
D = D[:len(ND), :len(ND)]
C = C[:len(ND), :len(ND)]
elif deepdist:
print('Obtaining a contact map from the input TXT..')
D = np.loadtxt(deepdist)
L = len(D)
if ND is not None and L != len(ND):
print('PDB is smaller! Trimming prediction..', L, len(ND))
D = D[:len(ND), :len(ND)]
C = 4.0 / (D + 0.001)
elif dmap is not None:
print('Load the input 2D distance map..')
D = np.load(dmap)
L = len(D)
if D.ndim == 3:
print('Reshaping needed here..')
D = D.reshape((L, L))
if ND is not None and L != len(ND):
print('PDB is smaller! Trimming prediction..', L, len(ND))
D = D[:len(ND), :len(ND)]
C = 4.0 / (D + 0.001)
elif truedmap:
print('Obtaining a true distance map from the input PDB..')
D = np.copy(ND)
C = 4.0 / (D + 0.001)
elif inputrr:
print('Obtaining a contact map from the input RR..')
(D, C, seq) = rr2dmap(inputrr)
print(seq)
if np.isnan(C).all(): sys.exit('ERROR! C is all NaNs!')
if D is not None and np.isnan(
D).all(): sys.exit('ERROR! D is all NaNs!')
else:
sys.exit('ERROR!! No input provided!!')
# Check for NaNs
print('')
if C is not None:
print('C.shape', C.shape)
print('Contact nans:', np.count_nonzero(
np.isnan(C)), 'of', str(len(C) * len(C)))
if D is not None:
print('D.shape', D.shape)
print('Distance nans:', np.count_nonzero(
np.isnan(D)), 'of', str(len(D) * len(D)))
if C is None and D is None:
sys.exit('ERROR!! Could not load contact or distance!')
# Evaluate distances
if D is not None:
print('')
print('Evaluating distances..')
all_metrics = calc_dist_errors_various_xl(
P=D, Y=ND, L=l_for_xL) # , separation = [minsep])
for k in all_metrics:
print(basename, native_basename, k, all_metrics[k])
# Evaluate contacts
if C is not None:
print('')
print('Evaluating contacts..')
all_metrics = calc_contact_errors_various_xl(
CPRED=C, CTRUE=NC) # , separation = [minsep])
for k in all_metrics:
print(basename, native_basename, k, all_metrics[k])
# Find and print lDDT scores if ND and D provided:
if (ND is not None) and (D is not None):
LDDT_dict = {}
for S in [0, 6, 12, 24]:
for R in [15]:
LDDT_dict[f"Radius:{R} min-seq-sep:{S}"] = get_LDDT(
ND, D, R, S)
print('')
print("Cb-distance map LDDT scores")
for LDDT_k, LDDT_v in LDDT_dict.items():
print(basename, native_basename, LDDT_k, " Cb-LDDT: ", LDDT_v)
if not modeling3d:
return 0
if file_fasta is None:
print('ERROR!! Fasta file is needed for building 3D models')
return 1
if job_dir is None:
print('ERROR!! job_dir is needed for building 3D models')
return 2
os.system('mkdir -p ' + job_dir)
file_rr = job_dir + '/x.rr'
if inputrr is None:
seq = seqfasta(file_fasta)
dmap2rr(D, seq, file_rr)
else:
os.system('cp ' + inputrr + ' ' + job_dir + '/x.rr')
f = open(job_dir + '/x.rr')
lines = f.readlines()
f.close()
restraint_count = 0
for l in lines:
if l[0].isdigit(): restraint_count += 1
print('')
print('Restraints (head):')
os.system('head ' + file_rr)
if restraint_count < 1:
print('ERROR!! No restraints to pass on to DISTFOLD! Exiting..')
return 3
print('')
print('Run DISTFOLD')
ssparam = ''
if ss is not None: ssparam = ' -ss ' + ss
status = os.system(
f"perl {DISTFOLD} -seq {file_fasta} -rr {file_rr} -o {job_dir} -mcount 20 -selectrr all" + ssparam)
if status != 0:
sys.exit('ERROR!! Could not executed DISTFOLD!')
if (native is not None) and (not os.path.exists(native)):
return 0
print('')
print('Run TM-score..')
os.chdir(job_dir + '/stage1/')
tmscores = {}
for pdb in os.listdir('./'):
if not pdb.endswith('pdb'): continue
tmscores[pdb] = evaltm(TM, native, pdb)
print('')
print("TM-score RMSD GDT-TS MODEL")
for pdb in sorted(tmscores.items(), key=lambda kv: kv[1][1]):
p = pdb[0]
(r, t, g) = tmscores[pdb[0]]
print(f"{t:5.3f} {r:6.3f} {g:5.3f} {p}")
def main():
args = get_args()
print(args)
native = None
file_fasta = None
dmap = None
trrosetta = None
inputrr = None
deepdist = None
ss = None
truedmap = False
modeling3d = False
threshold = args.threshold
job_dir = args.jobdir
minsep = args.minsep
basename = ""
native_basename = ""
if args.native is not None:
native = os.path.abspath(args.native)
native_basename = os.path.basename(native)
if args.fasta is not None: file_fasta = os.path.abspath(args.fasta)
if args.dmap is not None:
dmap = os.path.abspath(args.dmap)
basename = dmap
if args.trrosetta is not None:
trrosetta = os.path.abspath(args.trrosetta)
basename = trrosetta
if args.deepdist is not None:
deepdist = os.path.abspath(args.deepdist)
basename = deepdist
if args.inputrr is not None:
inputrr = os.path.abspath(args.inputrr)
basename = inputrr
if args.ss is not None: ss = os.path.abspath(args.ss)
if args.truedmap is True:
truedmap = True
basename = native
if args.modeling3d is True: modeling3d = True
disteval_main(native=native,
file_fasta=file_fasta,
dmap=dmap,
trrosetta=trrosetta, deepdist=deepdist,
inputrr=inputrr,
ss=ss,
truedmap=truedmap,
modeling3d=modeling3d,
threshold=threshold,
job_dir=job_dir,
minsep=minsep,
basename=basename,
native_basename=native_basename)
if __name__ == "__main__": main() |
the-stack_0_23853 | #!/usr/bin/env python
"""
synopsis:
Custom routing Router to Mama (ROUTER to REQ)
Author: Jeremy Avnet (brainsik) <spork(dash)zmq(at)theory(dot)org>
Modified for tornado/ioloop: Dave Kuhlman <dkuhlman(at)davekuhlman(dot)org>
usage:
python rtreq.py
"""
import sys
import random
import zmq
from functools import partial
from zmq.eventloop.future import Context
from zmq.eventloop.ioloop import IOLoop
from tornado import gen
import zhelpers
NBR_WORKERS = 10
@gen.coroutine
def worker_task(id, context=None):
context = context or Context.instance()
worker = context.socket(zmq.REQ)
# We use a string identity for ease here
zhelpers.set_id(worker)
worker.connect("tcp://localhost:5671")
total = 0
while True:
# Tell the router we're ready for work
yield worker.send(b"ready")
# Get workload from router, until finished
workload = yield worker.recv()
#print('(worker {}) received: {}'.format(id, workload))
finished = workload == b"END"
if finished:
print("worker %d processed: %d tasks" % (id, total))
break
total += 1
# Do some random work
yield gen.sleep(0.1 * random.random())
raise gen.Return(('worker {}'.format(id), total))
@gen.coroutine
def requestor(client):
for _ in range(NBR_WORKERS * 10):
# LRU worker is next waiting in the queue
address, empty, ready = yield client.recv_multipart()
yield client.send_multipart([
address,
b'',
b'This is the workload',
])
# Now ask mama to shut down and report their results
for _ in range(NBR_WORKERS):
address, empty, ready = yield client.recv_multipart()
yield client.send_multipart([
address,
b'',
b'END',
])
raise gen.Return(('requestor', 'finished'))
@gen.coroutine
def run(loop):
context = Context.instance()
client = context.socket(zmq.ROUTER)
client.bind("tcp://*:5671")
responses = yield [
worker_task(idx) for idx in range(NBR_WORKERS)
] + [requestor(client)]
print('responses: {}'.format(responses))
def main():
args = sys.argv[1:]
if len(args) != 0:
sys.exit(__doc__)
try:
loop = IOLoop.current()
loop.run_sync(partial(run, loop))
print('(main) exiting')
except KeyboardInterrupt:
print('\nFinished (interrupted)')
sys.exit(0)
if __name__ == '__main__':
main()
print('(program) finished')
|
the-stack_0_23854 | import os
import numpy as np
import cv2
import random
import shutil
import xml.etree.ElementTree as ET
def copy_and_rename_xml(original_path, new_path, reference_image_name):
original_directory, original_image_file_name = os.path.split(original_path)
shutil.copy(original_path, new_path)
#change xml metadata
tree = ET.parse(new_path)
root = tree.getroot()
for fileNameXML in root.iter('filename'):
fileNameXML.text = reference_image_name
tree.write(new_path)
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
inv_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inv_gamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def generate_gamma_corrected_images(source_path, destiny_path):
files = [file for file in os.listdir(source_path) if file.endswith(".jpg") or file.endswith(".jpeg")]
for file in files:
random_gamma = random.uniform(1.0, 4.0)
original_img = cv2.imread(os.path.join(source_path, file))
output = adjust_gamma(original_img, random_gamma)
filename, file_extension = os.path.splitext(file)
new_image_file_name = filename + "_light" + file_extension
cv2.imwrite(os.path.join(destiny_path, new_image_file_name), output)
xml_file_name = file.replace(".jpeg", ".xml").replace(".jpg", ".xml")
new_xml_file_name = new_image_file_name.replace(".jpeg", ".xml").replace(".jpg", ".xml")
# write XML
xml_path = os.path.join(source_path, "annotations", xml_file_name)
if not os.path.exists(os.path.join(destiny_path, "annotations")):
os.makedirs(os.path.join(destiny_path, "annotations"))
new_xml_path = os.path.join(os.path.join(destiny_path, "annotations"), new_xml_file_name)
copy_and_rename_xml(xml_path, new_xml_path,
new_image_file_name)
|
the-stack_0_23856 | from typing import Any
from hw.maria_saganovich.lesson6_hw.func_decorator import my_func_decorator
@my_func_decorator
def func9_swap_keys_values(arg: Any) -> dict:
result: dict = {}
assert isinstance(arg, dict), ["Invalid arg"]
for _value, key in enumerate(list(arg)):
if isinstance(arg[key], (list, dict, set, frozenset)):
raise Exception(["Unhashable arg type"])
if arg[key] in result:
result[arg[key]] = [result[arg[key]], key]
else:
result[arg[key]] = key
return result
|
the-stack_0_23859 | #!/usr/bin/env python3
import sys
import itertools
from collections import namedtuple
Dim = namedtuple('Dim', 'name')
Col = namedtuple('Col', 'type name dim dim_offset dim_factor')
Database = namedtuple('Database', 'name dim_list col_list')
Schema = namedtuple('Schema', 'database_list namespace include_guard include_list')
def generate_database_code(database, convertible_from):
def format_offset(o):
if o < 0:
return " - "+str(-o)
elif o > 0:
return " + "+str(o)
else:
return ""
def push(s):
lines.extend(s.split("\n"))
push____= push
def push_gbl(s):
push____(s.format(
N=database.name,
E="explicit " if len(database.dim_list)==1 else ""
))
def push_col(s):
for col in database.col_list:
push(s.format(
N=database.name,
T=col.type,
D=col.dim,
C=col.name,
P=format_offset(col.dim_offset),
M=format_offset(-col.dim_offset),
F="" if col.dim_factor == 1 else str(col.dim_factor)+"*"
))
def push_dim(s):
for dim in database.dim_list:
push(s.format(
N=database.name,
D=dim.name
))
def remove_last_chars(n):
lines[-1] = lines[-1][:-n]
def replace_last_chars(n, s):
lines[-1] = lines[-1][:-n]+s
intro_lines = []
main_lines = []
finish_lines = []
lines = intro_lines
push_gbl("struct Ref{N};")
push_gbl("struct ConstRef{N};")
lines = main_lines
push____("")
push_gbl("struct Ref{N}{{")
push_dim("\tsize_t {D};")
push_col("\t{T}* __restrict__ {C}; // size = {F}{D}{P}")
push____("")
push_col("\tSpan<{T}> {C}_as_ref()noexcept;")
push_col("\tSpan<const {T}> {C}_as_ref()const noexcept;")
push_col("\tSpan<const {T}> {C}_as_cref()const noexcept;")
lines = finish_lines
push____("")
push_col("inline Span<{T}> Ref{N}::{C}_as_ref()noexcept{{\n\treturn {{{C}, {C}+{F}{D}{P}}};\n}}\n")
push_col("inline Span<const {T}> Ref{N}::{C}_as_ref()const noexcept{{\n\treturn {C}_as_cref();\n}}\n")
push_col("inline Span<const {T}> Ref{N}::{C}_as_cref()const noexcept{{\n\treturn {{{C}, {C}+{F}{D}{P}}};\n}}\n")
lines = main_lines
push____("")
push_gbl("\tRef{N}();")
for other_database in convertible_from:
lines = main_lines
push("\tRef"+database.name+"(Ref"+other_database.name+");")
lines = finish_lines
push____("")
push____("inline Ref"+database.name+"::Ref"+database.name+"(Ref"+other_database.name+" o):")
push_dim("\t{D}(o.{D}),")
push_col("\t{C}(o.{C}),")
replace_last_chars(1, "{}")
lines = main_lines
push_gbl("\tRef{N}(")
push_dim("\t\tsize_t {D},")
push_col("\t\t{T}* __restrict__ {C},")
remove_last_chars(1)
push____("\t);")
lines = finish_lines
push____("")
push_gbl("inline Ref{N}::Ref{N}(){{}}")
push____("")
push_gbl("inline Ref{N}::Ref{N}(")
push_dim("\tsize_t {D},")
push_col("\t{T}* __restrict__ {C},")
remove_last_chars(1)
push____("):")
push_dim("\t{D}({D}),")
push_col("\t{C}({C}),")
replace_last_chars(1, "{}")
lines = main_lines
push____("};")
push____("")
push_gbl("struct ConstRef{N}{{")
push_dim("\tsize_t {D};")
push_col("\tconst {T}* __restrict__ {C}; // size = {F}{D}{P}")
push____("")
push_col("\tSpan<const {T}> {C}_as_ref()const noexcept;")
push_col("\tSpan<const {T}> {C}_as_cref()const noexcept;")
lines = finish_lines
push____("")
push_col("inline Span<const {T}> ConstRef{N}::{C}_as_ref()const noexcept{{\n\treturn {C}_as_cref();\n}}\n")
push_col("inline Span<const {T}> ConstRef{N}::{C}_as_cref()const noexcept{{\n\treturn {{{C}, {C}+{F}{D}{P}}};\n}}\n")
lines = main_lines
push____("")
push_gbl("\tConstRef{N}();")
push_gbl("\tConstRef{N}(Ref{N});")
for other_database in convertible_from:
lines = main_lines
push("\tConstRef"+database.name+"(Ref"+other_database.name+");")
push("\tConstRef"+database.name+"(ConstRef"+other_database.name+");")
lines = finish_lines
push____("")
push____("inline ConstRef"+database.name+"::ConstRef"+database.name+"(Ref"+other_database.name+" o):")
push_dim("\t{D}(o.{D}),")
push_col("\t{C}(o.{C}),")
replace_last_chars(1, "{}")
push____("")
push____("inline ConstRef"+database.name+"::ConstRef"+database.name+"(ConstRef"+other_database.name+" o):")
push_dim("\t{D}(o.{D}),")
push_col("\t{C}(o.{C}),")
replace_last_chars(1, "{}")
lines = main_lines
push_gbl("\tConstRef{N}(")
push_dim("\t\tsize_t {D},")
push_col("\t\tconst {T}* __restrict__ {C},")
remove_last_chars(1)
push____("\t);")
lines = finish_lines
push____("")
push_gbl("inline ConstRef{N}::ConstRef{N}() {{}}")
push____("")
push_gbl("inline ConstRef{N}::ConstRef{N}(Ref{N} o):")
push_dim("\t{D}(o.{D}),")
push_col("\t{C}(o.{C}),")
replace_last_chars(1, "{}")
push____("")
push_gbl("inline ConstRef{N}::ConstRef{N}(")
push_dim("\tsize_t {D},")
push_col("\tconst {T}* __restrict__ {C},")
remove_last_chars(1)
push____("):")
push_dim("\t{D}({D}),")
push_col("\t{C}({C}),")
replace_last_chars(1, "{}")
lines = main_lines
push____("};")
push____("")
push_gbl("struct Vec{N}{{")
push_dim("\tsize_t {D};")
push_col("\tstd::vector<{T}>{C};")
push____("")
push_gbl("\tVec{N}();")
push_gbl("\t{E}Vec{N}(")
push_dim("\t\tsize_t {D},")
remove_last_chars(1)
push____("\t);")
push_gbl("\tvoid resize(")
push_dim("\t\tsize_t {D},")
remove_last_chars(1)
push____("\t);")
push____("\tvoid throw_if_wrong_size()const;")
push____("\tvoid assert_correct_size()const noexcept;")
push____("\tvoid shrink_to_fit();")
push_gbl("\tConstRef{N} as_ref()const noexcept;")
push_gbl("\tRef{N} as_ref()noexcept;")
push_gbl("\tConstRef{N} as_cref()const noexcept;")
push____("")
push_col("\tSpan<{T}> {C}_as_ref()noexcept;")
push_col("\tSpan<const {T}> {C}_as_ref()const noexcept;")
push_col("\tSpan<const {T}> {C}_as_cref()const noexcept;")
lines = finish_lines
push____("")
push_col("inline Span<{T}> Vec{N}::{C}_as_ref()noexcept{{\n\treturn {{&{C}[0], &{C}[0]+{C}.size()}};\n}}\n")
push_col("inline Span<const {T}> Vec{N}::{C}_as_ref()const noexcept{{\n\treturn {C}_as_cref();\n}}\n")
push_col("inline Span<const {T}> Vec{N}::{C}_as_cref()const noexcept{{\n\treturn {{{C}.data(), {C}.data()+{C}.size()}};\n}}\n")
lines = main_lines
push____("};")
push____("")
lines = finish_lines
push____("")
push_gbl("inline Vec{N}::Vec{N}(){{}}")
push____("")
push_gbl("inline Vec{N}::Vec{N}(")
push_dim("\tsize_t {D},")
remove_last_chars(1)
push____("):")
push_dim("\t{D}({D}),")
push_col("\t{C}({F}{D}{P}),")
replace_last_chars(1, "{}")
push____("")
push_gbl("inline void Vec{N}::resize(")
push_dim("\tsize_t {D},")
remove_last_chars(1)
push____("){")
push_dim("\tthis->{D} = {D};")
push_col("\t{C}.resize({F}{D}{P});")
push____("}")
push____("")
push_gbl("inline void Vec{N}::throw_if_wrong_size()const{{")
push____("\tstd::string err;")
push_col("\tif({C}.size() != {F}{D}{P})\n\t\t err += (\"Column {C} has wrong size. Expected: \"+std::to_string({F}{D}{P})+\" Actual: \"+std::to_string({C}.size())+\"\\n\");")
push____("\tif(!err.empty())")
push____("\t\tthrow std::runtime_error(err);")
push____("}")
push____("")
push_gbl("inline void Vec{N}::assert_correct_size()const noexcept{{")
push_col("\tassert({C}.size() == {F}{D}{P});")
push____("}")
push____("")
push_gbl("inline void Vec{N}::shrink_to_fit(){{")
push_col("\t{C}.shrink_to_fit();")
push____("}")
push____("")
push_gbl("inline ConstRef{N} Vec{N}::as_ref()const noexcept{{")
push____("\treturn as_cref();")
push____("}")
push____("")
push_gbl("inline Ref{N} Vec{N}::as_ref()noexcept{{")
push____("\treturn {")
push_dim("\t\t{D},")
push_col("\t\t&{C}[0],")
remove_last_chars(1)
push____("\t};")
push____("}")
push____("")
push_gbl("inline ConstRef{N} Vec{N}::as_cref()const noexcept{{")
push____("\treturn {")
push_dim("\t\t{D},")
push_col("\t\t{C}.data(),")
remove_last_chars(1)
push____("\t};")
push____("}")
lines = main_lines
push_gbl("struct Dir{N}{{")
push_dim("\tsize_t {D};")
push_col("\tFileArray<{T}>{C};")
push____("")
push_gbl("\tDir{N}();")
push_gbl("\texplicit Dir{N}(std::string dir);")
lines = finish_lines
push____("")
push_gbl("inline Dir{N}::Dir{N}(){{}}")
push____("")
push_gbl("inline Dir{N}::Dir{N}(std::string dir){{")
push____("\tappend_dir_slash_if_needed(dir);")
push_dim("\t{{\n\t\tFileDataSource src(dir+\"{D}\");\n\t\tread_full_buffer_from_data_source(src.as_ref(), (uint8_t*)&{D}, sizeof(size_t));\n\t}}")
push_col("\t{C} = FileArray<{T}>(dir+\"{C}\");")
push____("\tthrow_if_wrong_col_size();")
push____("}")
lines = main_lines
push____("\tvoid open(std::string dir);")
push____("\tvoid close();")
push____("\tvoid throw_if_wrong_col_size()const;")
lines = finish_lines
push____("")
push_gbl("inline void Dir{N}::open(std::string dir){{")
push_gbl("\t*this = Dir{N}(std::move(dir));")
push____("}")
push____("")
push_gbl("inline void Dir{N}::close(){{")
push_gbl("\t*this = Dir{N}();")
push____("}")
push____("")
push_gbl("inline void Dir{N}::throw_if_wrong_col_size()const{{")
push____("\tstd::string err;")
push_col("\tif({C}.size() != {F}{D}{P})\n\t\t err += (\"Column {C} has wrong size. Expected: \"+std::to_string({F}{D}{P})+\" Actual: \"+std::to_string({C}.size())+\"\\n\");")
push____("\tif(!err.empty())")
push____("\t\tthrow std::runtime_error(err);")
push____("}")
lines = main_lines
push_gbl("\tConstRef{N} as_cref()const noexcept;")
push_gbl("\tConstRef{N} as_ref()const noexcept;")
lines = finish_lines
push____("")
push_gbl("inline ConstRef{N} Dir{N}::as_cref()const noexcept{{")
push____("\treturn {")
push_dim("\t\t{D},")
push_col("\t\t{C}.data(),")
remove_last_chars(1)
push____("\t};")
push____("}")
push____("")
push_gbl("inline ConstRef{N} Dir{N}::as_ref()const noexcept{{")
push____("\treturn as_cref();")
push____("}")
lines = main_lines
push____("")
push_col("\tSpan<const {T}> {C}_as_ref()const noexcept;")
push_col("\tSpan<const {T}> {C}_as_cref()const noexcept;")
lines = finish_lines
push____("")
push_col("inline Span<const {T}> Dir{N}::{C}_as_ref()const noexcept{{\n\treturn {C}_as_cref();\n}}\n")
push_col("inline Span<const {T}> Dir{N}::{C}_as_cref()const noexcept{{\n\treturn {{{C}.data(), {C}.data()+{C}.size()}};\n}}\n")
lines = main_lines
push____("};")
push____("")
push_gbl("inline void dump_into_dir(std::string dir, Ref{N} data);")
push_gbl("inline void dump_into_dir(std::string dir, ConstRef{N} data);")
lines = finish_lines
push____("")
push_gbl("inline void dump_into_dir(std::string dir, Ref{N} data){{")
push_gbl("\tdump_into_dir(std::move(dir), ConstRef{N}(data));")
push____("}")
push____("")
push_gbl("inline void dump_into_dir(std::string dir, ConstRef{N} data){{")
push_gbl("\tappend_dir_slash_if_needed(dir);")
push_dim("\tFileDataSink(dir+\"{D}\")((const uint8_t*)&data.{D}, sizeof(size_t));");
push_col("\tFileDataSink(dir+\"{C}\")((const uint8_t*)data.{C}, ({F}data.{D}{P})*sizeof({T}));");
push____("}")
return intro_lines, main_lines, finish_lines
def parse_schema(in_stream):
database_list = []
namespace = None
include_guard = None
include_list = []
database_name = None
col_list = []
dim_list = []
dim_set = set()
def database_finish():
database_list.append(
Database(
name = database_name,
dim_list = dim_list,
col_list = col_list
)
)
for line in in_stream:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '=':
if database_name != None:
database_finish()
database_name = line.strip("= \t")
col_list = []
dim_list = []
dim_set = set()
elif line[0] == '!':
cmd = [x for x in line[1:].strip().split(" ") if x!=""]
if cmd[0] == "namespace":
namespace = cmd[1]
elif cmd[0] == "include_guard":
include_guard = cmd[1]
elif cmd[0] == "include":
include_list.append(cmd[1])
else:
sep1 = line.find("@")
sep2 = line.rfind(" ")
if sep1 == -1 or sep2 == -1 or sep2 < sep1:
dim_name = line.strip()
if not dim_name in dim_set:
dim_set.add(dim_name)
dim_list.append(Dim(name = dim_name))
else:
type_name = line[:sep1].strip()
dim_name = line[sep1+1:sep2].strip()
col_name = line[sep2+1:].strip()
dim_offset = 0
dim_factor = 1
mul = dim_name.find("*")
if mul != -1:
dim_factor = int(dim_name[:mul].strip())
dim_name = dim_name[mul+1:].strip()
m = dim_name.rfind("-")
p = dim_name.rfind("+")
if m < p and p != -1:
dim_offset = int(dim_name[p+1:].strip())
dim_name = dim_name[:p].strip()
elif p < m and m != -1:
dim_offset = -int(dim_name[m+1:].strip())
dim_name = dim_name[:m].strip()
if not dim_name in dim_set:
dim_set.add(dim_name)
dim_list.append(Dim(name = dim_name))
col_list.append(Col(type = type_name, name = col_name, dim = dim_name, dim_offset = dim_offset, dim_factor=dim_factor))
database_finish()
return Schema(
database_list = database_list, namespace = namespace,
include_list = include_list, include_guard = include_guard
)
schema = parse_schema(sys.stdin)
def is_database_subset_of(database_a, database_b):
dim_list_set_b = set((dim.name for dim in database_b.dim_list))
for dim in database_a.dim_list:
if not dim.name in dim_list_set_b:
return False
del dim_list_set_b
col_list_set_b = set((col.name for col in database_b.col_list))
for col in database_a.col_list:
if not col.name in col_list_set_b:
return False
del col_list_set_b
return True
lines = []
if schema.include_guard != None:
lines.append("#ifndef "+schema.include_guard)
lines.append("#define "+schema.include_guard)
lines.append("")
lines.append("#include \"span.h\"")
lines.append("#include \"dir.h\"")
lines.append("#include \"file_array.h\"")
lines.append("#include \"data_sink.h\"")
lines.append("#include \"data_source.h\"")
for include in schema.include_list:
lines.append("#include "+include)
lines.append("#include <vector>")
lines.append("#include <string>")
lines.append("#include <stdexcept>")
lines.append("#include <stdlib.h>")
lines.append("#include <stdint.h>")
lines.append("#include <assert.h>")
lines.append("")
if schema.namespace != None:
lines.append("namespace "+schema.namespace+" {")
lines.append("")
def breakup_into_max_len_lines(s, max):
lines = ["//"]
for word in s.replace("\n", " ").split(" "):
if len(word) != 0:
if len(lines[-1]) + len(word) + 1 <= max:
lines[-1] = lines[-1] + " " + word
else:
lines.append("// "+word)
return lines
str.split("\n")
# lines.extend(breakup_into_max_len_lines("""
# This header defines types that represent databases of POD types.
# Every databases consists of a fixed number of columns and dimensions.
# A column is an array of a POD type.
# The length of the array is one of the dimensions plus possibly a constant offset.
# All columns of a database with the same dimension can be viewed as being part of a database.
# Suppose a database is called MyData. In this case, there are four types:
# RefMyData,
# ConstRefMyData,
# VecMyData,
# DirMyData.
# The first two types are structs of sizes and pointers.
# They are PODs.
# They do not own the referenced data and do not cleanup the memory upon destruction.
# MyDataRef can mutate the data in the rows.
# MyDataConstRef references read-only data.
# DynMyData is a struct of vectors.
# For every column there is a vector.
# You can construct a MyDataRef or a MyDataConstRef by calling as_ref or as_cref.
# DirMyData represent views into memory mapped read-only files.
# There is one file per column and the name of the column is the name of the file.
# Files can be written into a directory using the dump_to_dir function.
# """, 75))
lines.append("")
main_lines = []
finish_lines = []
for database in schema.database_list:
convertible_from_database = []
for other_database in schema.database_list:
if other_database != database:
if is_database_subset_of(database, other_database):
convertible_from_database.append(other_database)
intro_code, main_code, finish_code = generate_database_code(database, convertible_from=convertible_from_database)
lines.extend(intro_code)
main_lines.extend(main_code)
finish_lines.extend(finish_code)
lines.extend(main_lines)
lines.extend(finish_lines)
if schema.namespace != None:
lines.append("")
lines.append("} // namespace "+schema.namespace)
if schema.include_guard != None:
lines.append("")
lines.append("#endif")
sys.stdout.write("\n".join(lines)+"\n")
|
the-stack_0_23860 | #!/usr/bin/env python
'''
Script to parse /jetsoncar/joint_states topic and publish encoder values to /encoder/front and /encoder/rear
'''
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from std_msgs.msg import Int32
import numpy as np
import math
import tf2_ros
class EncoderNode:
# Set publishers
pub_front = rospy.Publisher('/encoder/front', Int32, queue_size=1)
pub_rear = rospy.Publisher('/encoder/rear', Int32, queue_size=1)
pub_timestamp = rospy.Publisher('/encoder/timestamp', Int32, queue_size=1)
def __init__(self):
# Subscribe to joint_states
rospy.Subscriber('/jetsoncar/joint_states', JointState, self.joint_states_update)
def joint_states_update(self, msg):
recieved_timestamp = rospy.Time.now()
# Find the index of the wheels
try:
idxFrontLeft = msg.name.index('front_left_wheel_joint')
idxFrontRight = msg.name.index('front_right_wheel_joint')
idxRearLeft = msg.name.index('rear_left_wheel_joint')
idxRearRight = msg.name.index('rear_right_wheel_joint')
except ValueError as e:
# Wait for Gazebo to startup
pass
else:
# Extract encoder angles in radians
encFrontLeft = msg.position[idxFrontLeft]
encFrontRight = msg.position[idxFrontLeft]
encRearLeft = msg.position[idxRearLeft]
encRearRight = msg.position[idxRearRight]
# Convert radians into ticks using a gearing ratio of 40 and 12 ticks pr. rev
encFrontLeft = (encFrontLeft / (2*math.pi)) * (40*12)
encFrontRight = (encFrontRight / (2*math.pi)) * (40*12)
encRearLeft = (encRearLeft / (2*math.pi)) * (40*12)
encRearRight = (encRearRight / (2*math.pi)) * (40*12)
# Prepare the data for publishing
encFront = Int32()
encFront.data = (encFrontLeft + encFrontRight) / 2
encRear = Int32()
encRear.data = (encRearLeft + encRearRight) / 2
timestamp = Int32()
timestamp.data = (recieved_timestamp.secs * 1000) + (recieved_timestamp.nsecs / 1000000) # publish milliseconds
self.pub_front.publish(encFront)
self.pub_rear.publish(encRear)
self.pub_timestamp.publish(timestamp)
# Start the node
if __name__ == '__main__':
rospy.init_node("jetsoncar_encoder_node")
node = EncoderNode()
rospy.spin()
|
the-stack_0_23861 | from django.contrib import admin
from reversion.admin import VersionAdmin
from suit.admin import RelatedFieldAdmin, get_related_field
from import_export import resources, fields
from import_export import fields
from import_export.admin import ImportExportModelAdmin, ExportActionModelAdmin
from survey.models import LASER, Map, Research, KnowledgeTracker
from survey.forms import ResearchForm, KnowledgeTrackerForm
from .utils import has_group
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
class DisseminationMethodFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'Dissemination Method'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'dissemination_method'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('Community Activity', 'Community Activity'),
('Social Media', 'Social Media'),
# ('Training', 'Training'),
('Official External Communication', 'Official External Communication'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value():
return queryset.filter(
dissemination_method__contains=self.value()
)
return queryset
class LASERResource(resources.ModelResource):
class Meta:
model = LASER
fields = (
'id',
'laser_id',
'organization',
'focal_point_contact',
'title',
'description',
'status',
'population_targeted',
'sectors_covered',
'report_link',
'required_followup',
'published_date',
'publication_date',
'estimated_cost',
'category',
'evaluation_type',
'geographical_focus',
'UNSF_outcome',
'section'
)
export_order = fields
# @admin.register(LASER)
class LASERAdmin(ImportExportModelAdmin):
resource_class = LASERResource
list_display = ('laser_id', 'organization', 'title', 'status',
'category', 'section')
date_hierarchy = 'created'
list_filter = ('organization', 'status', 'category', 'section')
# suit_list_filter_horizontal = ('organization', 'status', 'category', 'section')
# list_select_related = True
class ResearchResource(resources.ModelResource):
class Meta:
model = Research
fields = (
'research_id',
'title',
'publication_year'
'organizations',
'researchers',
'type',
'main_sector',
'geographical_coverage',
'description',
'report_link',
'recommendations',
'planned_actions',
)
export_order = fields
# @admin.register(Research)
class ResearchAdmin(ImportExportModelAdmin, VersionAdmin):
resource_class = ResearchResource
form = ResearchForm
list_display = ('research_id', 'title', 'publication_year',
'type', 'main_sector', 'geographical_coverage')
date_hierarchy = 'created'
list_filter = ('type', 'main_sector', 'geographical_coverage', 'publication_year')
search_fields = (
'research_id',
'title',
'organizations',
'researchers'
)
readonly_fields = (
'research_id',
)
fieldsets = [
('', {
# 'classes': ('suit-tab', 'suit-tab-general',),
'fields': [
'research_id',
'title',
'publication_year',
'organizations',
'researchers',
]
}),
('', {
'fields': [
'type',
'main_sector',
'geographical_coverage',
'description',
'report_link',
]
}),
('', {
'fields': [
'recommendations',
]
}),
('', {
'fields': [
'planned_actions',
]
}),
('', {
'fields': [
'taken_actions',
]
})
]
class MapResource(resources.ModelResource):
class Meta:
model = Map
fields = (
'id',
'name',
'description',
'link',
'status',
)
export_order = fields
# @admin.register(Map)
class MapAdmin(ImportExportModelAdmin):
resource_class = MapResource
list_display = ('name', 'description', 'status')
date_hierarchy = 'created'
list_filter = ('status', )
class KnowledgeTrackerResource(resources.ModelResource):
class Meta:
model = KnowledgeTracker
fields = (
'issue_number',
'high_priority',
# 'reported_by__first_name',
'reported_by__last_name',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'frequency',
'target_population',
'other_population_considerations',
'answer',
'validated_by_technical_committee',
'dissemination_method',
'relevant_link',
'feedback_status',
'feedback_text',
'feedback_color',
'created'
)
export_order = fields
def custom_titled_filter(title):
class Wrapper(admin.FieldListFilter):
def __new__(cls, *args, **kwargs):
instance = admin.FieldListFilter.create(*args, **kwargs)
instance.title = title
return instance
return Wrapper
@admin.register(KnowledgeTracker)
class KnowledgeTrackerAdmin(ExportActionModelAdmin, VersionAdmin):
resource_class = KnowledgeTrackerResource
form = KnowledgeTrackerForm
list_display = (
'issue_number',
'high_priority',
'feedback',
'reported_organization',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'frequency',
'target_population',
'other_population_considerations',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link'
)
def feedback(self, obj):
t = render_to_string("django_tables2/feedback_column.html", {'id': str(obj.id), 'feedback_color': str(obj.feedback_color)})
return mark_safe(t)
feedback.allow_tags = True
date_hierarchy = 'created'
list_filter = (
'high_priority',
('reported_by__last_name', custom_titled_filter('Reported By')),
'issue_category',
'source',
'target_population',
'other_population_considerations',
'validated_by_technical_committee',
# 'validated_by_moph',
# DisseminationMethodFilter
)
suit_list_filter_horizontal = (
'high_priority',
'reported_by__last_name',
'issue_category',
'source',
'target_population',
'other_population_considerations',
'validated_by_technical_committee',
# 'validated_by_moph',
# DisseminationMethodFilter
)
search_fields = (
'issue_number',
'high_priority',
# 'reported_by',
'issue_category',
'issue_description',
# 'frequency',
'source',
'source_relevant_link',
'target_population',
'other_population_considerations',
'answer',
# 'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
)
readonly_fields = (
'issue_number',
)
list_editable = [
'frequency',
]
fieldsets = [
('Issue details', {
'fields': [
'issue_number',
'high_priority',
# 'reported_by',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'frequency',
'target_population',
'other_population_considerations',
]
}),
('Response', {
'fields': [
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
})
]
def has_import_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
# def has_change_permission(self, request, obj=None):
# if has_group(request.user, 'EDITOR'):
# if obj and obj.created_by == request.user:
# return True
# return False
# return True
def get_readonly_fields(self, request, obj=None):
fields = [
'issue_number',
'high_priority',
'reported_by',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'target_population',
'other_population_considerations',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
if has_group(request.user, 'ADMIN'):
fields = [
'issue_number',
]
if has_group(request.user, 'EDITOR'):
if obj:
if obj.created_by == request.user:
fields = [
'issue_number',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
else:
fields = [
'issue_number',
'high_priority',
'reported_by',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'target_population',
'other_population_considerations',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
else:
fields = [
'issue_number',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
if has_group(request.user, 'VIEW'):
if obj:
fields = [
'issue_number',
'high_priority',
'reported_by',
'issue_category',
'issue_description',
'source',
'source_relevant_link',
'source_number_percentage',
'target_population',
'other_population_considerations',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
else:
fields = [
'issue_number',
'answer',
'validated_by_technical_committee',
# 'validated_by_moph',
'dissemination_method',
'relevant_link',
]
return fields
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.reported_by = request.user
else:
obj.modified_by = request.user
super(KnowledgeTrackerAdmin, self).save_model(request, obj, form, change)
# def get_queryset(self, request):
# if has_group(request.user, 'EDITOR'):
# return KnowledgeTracker.objects.filter(created_by=request.user)
# return KnowledgeTracker.objects.all()
|
the-stack_0_23863 | from django.db import models
from django.contrib.auth import get_user_model
class Friend(models.Model):
"""This is a model to build relationships between users"""
# the user doing the following
user_from = models.ForeignKey(
get_user_model(),
related_name='rel_from_set',
on_delete=models.CASCADE)
# the user being followed
user_to = models.ForeignKey(
get_user_model(),
related_name='rel_to_set',
on_delete=models.CASCADE)
# time stamp associated with the action
created_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-created_at',)
unique_together = ('user_from', 'user_to')
def __str__(self):
return '{} follows {}'.format(
self.user_from.username,
self.user_to.username)
# adds following field to user dynamically
get_user_model().add_to_class('following', models.ManyToManyField(
'self', through=Friend,
related_name='followers',
symmetrical=False
))
|
the-stack_0_23864 | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
import json
import os
import shutil
import tempfile
import unittest
from extension.src.Constants import Constants
from extension.tests.helpers.RuntimeComposer import RuntimeComposer
from extension.tests.helpers.VirtualTerminal import VirtualTerminal
class TestJsonFileHandler(unittest.TestCase):
def setUp(self):
VirtualTerminal().print_lowlight("\n----------------- setup test runner -----------------")
runtime = RuntimeComposer()
self.json_file_handler = runtime.json_file_handler
def tearDown(self):
VirtualTerminal().print_lowlight("\n----------------- tear down test runner -----------------")
def mock_json_dump_with_exception(self):
raise Exception
def test_get_json_file_content_success(self):
file = Constants.EXT_STATE_FILE
dir_path = os.path.join(os.path.pardir, "tests", "helpers")
json_content = self.json_file_handler.get_json_file_content(file, dir_path, raise_if_not_found=True)
self.assertTrue(json_content is not None)
def test_get_json_file_content_failure(self):
file = Constants.EXT_STATE_FILE
dir_path = os.path.join(os.path.pardir, "tests", "helper")
self.assertRaises(Exception, self.json_file_handler.get_json_file_content, file, dir_path, raise_if_not_found=True)
def test_create_file_success(self):
# Create a temporary directory
test_dir = tempfile.mkdtemp()
file = "test.json"
content = {'testKey1': 'testVal1',
'testKey2': {'testsubKey1': 'testsubVal1'},
'testKey3': [{'testsubKey2': 'testsubVal2'}]}
# create a file
self.json_file_handler.write_to_json_file(test_dir, file, content)
self.assertTrue(os.path.exists(os.path.join(test_dir, "test.json")))
json_content = self.json_file_handler.get_json_file_content(file, test_dir, raise_if_not_found=False)
self.assertTrue('testKey1' in json_content)
# Remove the directory after the test
shutil.rmtree(test_dir)
def test_create_file_failure(self):
# Create a temporary directory
test_dir = tempfile.mkdtemp()
file = "test.json"
content = {'testKey1': 'testVal1',
'testKey2': {'testsubKey1': 'testsubVal1'},
'testKey3': [{'testsubKey2': 'testsubVal2'}]}
self.assertRaises(Exception, self.json_file_handler.write_to_json_file, "test_dir", file, content)
json_dump_backup = json.dump
json.dump = self.mock_json_dump_with_exception
self.assertRaises(Exception, self.json_file_handler.write_to_json_file, test_dir, file, content)
json.dump = json_dump_backup
# Remove the directory after the test
shutil.rmtree(test_dir)
def test_get_json_config_value_safely(self):
content = {'testKey1': 'testVal1',
'testKey2': {'testsubKey1': 'testsubVal1'},
'testKey3': [{'testsubKey2': 'testsubVal2'}]}
self.assertTrue(self.json_file_handler.get_json_config_value_safely(None, 'testsubKey1', 'testKey2', raise_if_not_found=True) is None)
self.assertEqual(self.json_file_handler.get_json_config_value_safely(content, 'testsubKey1', 'testKey2', raise_if_not_found=True), 'testsubVal1')
self.assertRaises(Exception, self.json_file_handler.get_json_config_value_safely, content, 'testsubKey1', 'testKey3', raise_if_not_found=True)
self.assertRaises(Exception, self.json_file_handler.get_json_config_value_safely, content, 'testsubKey2', 'testKey3', raise_if_not_found=True)
self.assertTrue(self.json_file_handler.get_json_config_value_safely(content, 'testsubKey2', 'testKey3', raise_if_not_found=False) is None)
self.assertRaises(Exception, self.json_file_handler.get_json_config_value_safely, content, 'testKey1', None, raise_if_not_found=True)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestJsonFileHandler)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
the-stack_0_23865 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from __future__ import print_function
import os
import re
import random
from avocado import fail_on
from apricot import TestWithServers
from daos_racer_utils import DaosRacerCommand
from command_utils import CommandFailure
from general_utils import check_file_exists, get_host_data, get_log_file
class ZeroConfigTest(TestWithServers):
"""Test class for zero-config tests.
Test Class Description:
Test to verify that client application to libdaos can access a running
DAOS system with & without any special environment variable definitions.
:avocado: recursive
"""
def setUp(self):
"""Set up for zero-config test."""
self.setup_start_servers = False
super(ZeroConfigTest, self).setUp()
def get_port_cnt(self, hosts, dev, port_counter):
"""Get the port count info for device names specified.
Args:
hosts (list): list of hosts
dev (str): device to get counter information for
port_counter (str): port counter to get information from
Returns:
dict: a dictionary of data values for each NodeSet key
"""
b_path = "/sys/class/infiniband/{}".format(dev)
file = os.path.join(b_path, "ports/1/counters", port_counter)
# Check if if exists for the host
check_result = check_file_exists(hosts, file)
if not check_result[0]:
self.fail("{}: {} not found".format(check_result[1], file))
cmd = "cat {}".format(file)
text = "port_counter"
error = "Error obtaining {} info".format(port_counter)
return get_host_data(hosts, cmd, text, error, 20)
def get_log_info(self, hosts, dev, env_state, log_file):
"""Get information from daos.log file to verify device used.
Args:
hosts (list): list of hosts
dev (str): device to get counter information for
env_state (bool): set state for OFI_INTERFACE env variable
log_file (str): log file to verify
Returns:
bool: status of whether correct device was used.
"""
# anticipate log switch
cmd = "if [ -f {0}.old ]; then head -50 {0}.old; else head -50 {0};" \
"fi".format(log_file)
err = "Error getting log data."
pattern = r"Using\s+client\s+provided\s+OFI_INTERFACE:\s+{}".format(dev)
detected = 0
for output in get_host_data(hosts, cmd, log_file, err).values():
detected = len(re.findall(pattern, output))
self.log.info(
"Found %s instances of client setting up OFI_INTERFACE=%s",
detected, dev)
# Verify
status = True
if env_state and detected != 1:
status = False
elif not env_state and detected == 1:
status = False
return status
@fail_on(CommandFailure)
def verify_client_run(self, exp_iface, env):
"""Verify the interface assigned by running a libdaos client.
Args:
exp_iface (str): expected interface to check.
env (bool): add OFI_INTERFACE variable to exported variables of
client command.
Returns:
bool: returns status
"""
hfi_map = {"ib0": "hfi1_0", "ib1": "hfi1_1"}
# Get counter values for hfi devices before and after
cnt_before = self.get_port_cnt(
self.hostlist_clients, hfi_map[exp_iface], "port_rcv_data")
# get the dmg config file for daos_racer
dmg = self.get_dmg_command()
# Let's run daos_racer as a client
daos_racer = DaosRacerCommand(self.bin,
self.hostlist_clients[0], dmg)
daos_racer.get_params(self)
# Update env_name list to add OFI_INTERFACE if needed.
if env:
daos_racer.update_env_names(["OFI_INTERFACE"])
# Setup the environment and logfile
logf = "daos_racer_{}_{}.log".format(exp_iface, env)
# Add FI_LOG_LEVEL to get more info on device issues
racer_env = daos_racer.get_environment(self.server_managers[0], logf)
racer_env["FI_LOG_LEVEL"] = "info"
racer_env["D_LOG_MASK"] = "INFO,object=ERR,placement=ERR"
daos_racer.set_environment(racer_env)
# Run client
daos_racer.run()
# Verify output and port count to check what iface CaRT init with.
cnt_after = self.get_port_cnt(
self.hostlist_clients, hfi_map[exp_iface], "port_rcv_data")
diff = 0
for cnt_b, cnt_a in zip(cnt_before.values(), cnt_after.values()):
diff = int(cnt_a) - int(cnt_b)
self.log.info("Port [%s] count difference: %s", exp_iface, diff)
# Read daos.log to verify device used and prevent false positives
self.assertTrue(
self.get_log_info(
self.hostlist_clients, exp_iface, env, get_log_file(logf)))
# If we don't see data going through the device, fail
status = True
if diff <= 0:
self.log.info("No traffic seen through device: %s", exp_iface)
status = False
else:
status = True
return status
def test_env_set_unset(self):
"""JIRA ID: DAOS-4880.
Test Description:
Test starting a daos_server process on 2 different numa
nodes and verify that client can start when OFI_INTERFACE is set
or unset. The test expects that the server will have two interfaces
available: hfi_0 and hfi_1.
:avocado: tags=all,daily_regression,hw,small,zero_config,env_set
"""
env_state = self.params.get("env_state", '/run/zero_config/*')
dev_info = {"ib0": 0, "ib1": 1}
exp_iface = random.choice(dev_info.keys())
# Configure the daos server
self.add_server_manager()
self.configure_manager(
"server",
self.server_managers[0],
self.hostlist_servers,
self.hostfile_servers_slots,
self.access_points)
self.assertTrue(
self.server_managers[0].set_config_value(
"fabric_iface", exp_iface),
"Error updating daos_server 'fabric_iface' config opt")
self.assertTrue(
self.server_managers[0].set_config_value(
"pinned_numa_node", dev_info[exp_iface]),
"Error updating daos_server 'pinned_numa_node' config opt")
# Start the daos server
self.start_server_managers()
# Verify
err = []
if not self.verify_client_run(exp_iface, env_state):
err.append("Failed run with expected dev: {}".format(exp_iface))
self.assertEqual(len(err), 0, "{}".format("\n".join(err)))
|
the-stack_0_23870 | # Copyright (c) 2014 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from networking_odl._i18n import _
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication.")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication.")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
cfg.IntOpt('sync_timeout', default=10,
help=_("(V2 driver) Sync thread timeout in seconds.")),
cfg.IntOpt('retry_count', default=5,
help=_("(V2 driver) Number of times to retry a row "
"before failing.")),
cfg.IntOpt('maintenance_interval', default=300,
help=_("(V2 driver) Journal maintenance operations interval "
"in seconds.")),
cfg.IntOpt('completed_rows_retention', default=0,
help=_("(V2 driver) Time to keep completed rows (in seconds)."
"For performance reasons it's not recommended to "
"change this from the default value (0) which "
"indicates completed rows aren't kept."
"This value will be checked every maintenance_interval "
"by the cleanup thread. To keep completed rows "
"indefinitely, set the value to -1")),
cfg.BoolOpt('enable_lightweight_testing',
default=False,
help=_('Test without real ODL.')),
cfg.StrOpt('port_binding_controller',
default='pseudo-agentdb-binding',
help=_('Name of the controller to be used for port binding.')),
cfg.IntOpt('processing_timeout', default='100',
help=_("(V2 driver) Time in seconds to wait before a "
"processing row is marked back to pending.")),
cfg.StrOpt('odl_hostconf_uri',
help=_("Path for ODL host configuration REST interface"),
default="/restconf/operational/neutron:neutron/hostconfigs"),
cfg.IntOpt('restconf_poll_interval', default=30,
help=_("Poll interval in seconds for getting ODL hostconfig")),
cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False,
help=_('Enable websocket for pseudo-agent-port-binding.')),
cfg.IntOpt('odl_features_retry_interval', default=5,
help=_("Wait this many seconds before retrying the odl features"
" fetch")),
cfg.ListOpt('odl_features', item_type=str,
help='A list of features supported by ODL'),
cfg.BoolOpt('enable_dhcp_service', default=False,
help=_('Enables the networking-odl driver to supply special'
' neutron ports of "dhcp" type to OpenDaylight'
' Controller for its use in providing DHCP Service.')),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def list_opts():
return [('ml2_odl', odl_opts)]
|
the-stack_0_23871 | """Home Assistant representation of an UPnP/IGD."""
from __future__ import annotations
import asyncio
from collections.abc import Mapping
from typing import Any
from urllib.parse import urlparse
from async_upnp_client import UpnpDevice, UpnpFactory
from async_upnp_client.aiohttp import AiohttpSessionRequester
from async_upnp_client.exceptions import UpnpError
from async_upnp_client.profiles.igd import IgdDevice
from homeassistant.components import ssdp
from homeassistant.components.ssdp import SsdpChange
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
import homeassistant.util.dt as dt_util
from .const import (
BYTES_RECEIVED,
BYTES_SENT,
LOGGER as _LOGGER,
PACKETS_RECEIVED,
PACKETS_SENT,
ROUTER_IP,
ROUTER_UPTIME,
TIMESTAMP,
WAN_STATUS,
)
class Device:
"""Home Assistant representation of a UPnP/IGD device."""
def __init__(self, hass: HomeAssistant, igd_device: IgdDevice) -> None:
"""Initialize UPnP/IGD device."""
self.hass = hass
self._igd_device = igd_device
self.coordinator: DataUpdateCoordinator = None
@classmethod
async def async_create_upnp_device(
cls, hass: HomeAssistant, ssdp_location: str
) -> UpnpDevice:
"""Create UPnP device."""
# Build async_upnp_client requester.
session = async_get_clientsession(hass)
requester = AiohttpSessionRequester(session, True, 20)
# Create async_upnp_client device.
factory = UpnpFactory(requester, disable_state_variable_validation=True)
return await factory.async_create_device(ssdp_location)
@classmethod
async def async_create_device(
cls, hass: HomeAssistant, ssdp_location: str
) -> Device:
"""Create UPnP/IGD device."""
upnp_device = await Device.async_create_upnp_device(hass, ssdp_location)
# Create profile wrapper.
igd_device = IgdDevice(upnp_device, None)
device = cls(hass, igd_device)
# Register SSDP callback for updates.
usn = f"{upnp_device.udn}::{upnp_device.device_type}"
await ssdp.async_register_callback(
hass, device.async_ssdp_callback, {ssdp.ATTR_SSDP_USN: usn}
)
return device
async def async_ssdp_callback(
self, headers: Mapping[str, Any], change: SsdpChange
) -> None:
"""SSDP callback, update if needed."""
if change != SsdpChange.UPDATE or ssdp.ATTR_SSDP_LOCATION not in headers:
return
location = headers[ssdp.ATTR_SSDP_LOCATION]
device = self._igd_device.device
if location == device.device_url:
return
new_upnp_device = Device.async_create_upnp_device(self.hass, location)
device.reinit(new_upnp_device)
@property
def udn(self) -> str:
"""Get the UDN."""
return self._igd_device.udn
@property
def name(self) -> str:
"""Get the name."""
return self._igd_device.name
@property
def manufacturer(self) -> str:
"""Get the manufacturer."""
return self._igd_device.manufacturer
@property
def model_name(self) -> str:
"""Get the model name."""
return self._igd_device.model_name
@property
def device_type(self) -> str:
"""Get the device type."""
return self._igd_device.device_type
@property
def usn(self) -> str:
"""Get the USN."""
return f"{self.udn}::{self.device_type}"
@property
def unique_id(self) -> str:
"""Get the unique id."""
return self.usn
@property
def hostname(self) -> str:
"""Get the hostname."""
url = self._igd_device.device.device_url
parsed = urlparse(url)
return parsed.hostname
def __str__(self) -> str:
"""Get string representation."""
return f"IGD Device: {self.name}/{self.udn}::{self.device_type}"
async def async_get_traffic_data(self) -> Mapping[str, Any]:
"""
Get all traffic data in one go.
Traffic data consists of:
- total bytes sent
- total bytes received
- total packets sent
- total packats received
Data is timestamped.
"""
_LOGGER.debug("Getting traffic statistics from device: %s", self)
values = await asyncio.gather(
self._igd_device.async_get_total_bytes_received(),
self._igd_device.async_get_total_bytes_sent(),
self._igd_device.async_get_total_packets_received(),
self._igd_device.async_get_total_packets_sent(),
)
return {
TIMESTAMP: dt_util.utcnow(),
BYTES_RECEIVED: values[0],
BYTES_SENT: values[1],
PACKETS_RECEIVED: values[2],
PACKETS_SENT: values[3],
}
async def async_get_status(self) -> Mapping[str, Any]:
"""Get connection status, uptime, and external IP."""
_LOGGER.debug("Getting status for device: %s", self)
values = await asyncio.gather(
self._igd_device.async_get_status_info(),
self._igd_device.async_get_external_ip_address(),
return_exceptions=True,
)
result = []
for idx, value in enumerate(values):
if isinstance(value, UpnpError):
# Not all routers support some of these items although based
# on defined standard they should.
_LOGGER.debug(
"Exception occurred while trying to get status %s for device %s: %s",
"status" if idx == 1 else "external IP address",
self,
str(value),
)
result.append(None)
continue
if isinstance(value, Exception):
raise value
result.append(value)
return {
WAN_STATUS: result[0][0] if result[0] is not None else None,
ROUTER_UPTIME: result[0][2] if result[0] is not None else None,
ROUTER_IP: result[1],
}
|
the-stack_0_23873 | """A Cox process model for spatial analysis
(Cox, 1955; Miller et al., 2014).
The data set is a N x V matrix. There are N NBA players, X =
{(x_1, ..., x_N)}, where each x_n has a set of V counts. x_{n, v} is
the number of attempted basketball shots for the nth NBA player at
location v.
We model a latent intensity function for each data point. Let K be the
N x V x V covariance matrix applied to the data set X with fixed
kernel hyperparameters, where a slice K_n is the V x V covariance
matrix over counts for a data point x_n.
For n = 1, ..., N,
p(f_n) = N(f_n | 0, K_n),
p(x_n | f_n) = \prod_{v=1}^V p(x_{n,v} | f_{n,v}),
where p(x_{n,v} | f_{n, v}) = Poisson(x_{n,v} | exp(f_{n,v})).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import MultivariateNormalTriL, Normal, Poisson
from edward.util import rbf
from scipy.stats import multivariate_normal, poisson
tf.flags.DEFINE_integer("N", default=308, help="Number of NBA players.")
tf.flags.DEFINE_integer("V", default=2, help="Number of shot locations.")
FLAGS = tf.flags.FLAGS
def build_toy_dataset(N, V):
"""A simulator mimicking the data set from 2015-2016 NBA season with
308 NBA players and ~150,000 shots."""
L = np.tril(np.random.normal(2.5, 0.1, size=[V, V]))
K = np.matmul(L, L.T)
x = np.zeros([N, V])
for n in range(N):
f_n = multivariate_normal.rvs(cov=K, size=1)
for v in range(V):
x[n, v] = poisson.rvs(mu=np.exp(f_n[v]), size=1)
return x
def main(_):
ed.set_seed(42)
# DATA
x_data = build_toy_dataset(FLAGS.N, FLAGS.V)
# MODEL
x_ph = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.V])
# Form (N, V, V) covariance, one matrix per data point.
K = tf.stack([rbf(tf.reshape(xn, [FLAGS.V, 1])) + tf.diag([1e-6, 1e-6])
for xn in tf.unstack(x_ph)])
f = MultivariateNormalTriL(loc=tf.zeros([FLAGS.N, FLAGS.V]),
scale_tril=tf.cholesky(K))
x = Poisson(rate=tf.exp(f))
# INFERENCE
qf = Normal(
loc=tf.get_variable("qf/loc", [FLAGS.N, FLAGS.V]),
scale=tf.nn.softplus(tf.get_variable("qf/scale", [FLAGS.N, FLAGS.V])))
inference = ed.KLqp({f: qf}, data={x: x_data, x_ph: x_data})
inference.run(n_iter=5000)
if __name__ == "__main__":
tf.app.run()
|
the-stack_0_23876 | """
author: Ye Junxian
time: 11/14/2016
link: https://github.com/un-knight/machine-learning-algorithm
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.animation import FuncAnimation
from func.read_from_txt import read_from_txt
from mpl_toolkits.mplot3d import Axes3D
def plot_dataset_scatter(y, x):
plt.scatter(x, y, marker='x')
plt.xlim(4, 24)
plt.xticks(np.linspace(4, 24, 10, endpoint=False))
plt.xlabel('Population of city in 10,000s')
plt.ylabel('Profit in 10,000s')
plt.show()
def plot_result(y, x, theta_history=None, gif_flag=False):
fig, ax = plt.subplots()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
save_dpi = 80
plt.scatter(x, y, marker='x')
plt.xlim(4, 24)
plt.xticks(np.linspace(4, 24, 10, endpoint=False))
ax.set_xlabel('Population of city in 10,000s')
ax.set_ylabel('Profit in 10,000s')
if theta_history and gif_flag:
theta_1, theta_0 = theta_history[0]
line, = ax.plot(x, theta_0 + theta_1 * x, 'r-', linewidth=2.0)
def update(frame_i):
theta_1i, theta_0i = theta_history[frame_i * 2]
line.set_ydata(theta_0i + theta_1i * x)
ax.set_title('Fit at iteration {0}'.format(frame_i * 2))
return [line]
anim = FuncAnimation(fig, update, frames=range(len(theta_history) // 2), interval=200)
anim.save('regression_process.gif', dpi=save_dpi, writer='imagemagick')
else:
ax.set_title('Fit at iteration {0}'.format(len(theta_history)-1))
ax.plot(x, theta_history[-1][1] + theta_history[-1][0] * x, 'r-', linewidth=2.0)
fig.savefig('result.png', dpi=save_dpi)
plt.show()
def plot_cost_3d(y, x, costfunc, theta_history=None):
N = 500
theta_1s = np.linspace(-1.0, 4.0, N)
theta_0s = np.linspace(-10.0, 10.0, N)
cost = np.zeros((N, N))
for i in range(N):
for j in range(N):
cost[i, j] = costfunc(y, x,
theta_1s[i],
theta_0s[j])
# 绘制3D 图像
fig = plt.figure()
fig.set_tight_layout(True)
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax1.set_xlabel('theta_1')
ax1.set_ylabel('theta_0')
ax1.set_title('Surface')
theta_1s_grid, theta_0s_grid = np.meshgrid(theta_1s, theta_0s)
surf = ax1.plot_surface(theta_1s_grid, theta_0s_grid, cost, cmap=cm.coolwarm)
# 绘制等高线图
ax2 = fig.add_subplot(1, 2, 2)
ax2.contour(theta_1s_grid, theta_0s_grid, cost)
ax2.set_xlabel('theta_1')
ax2.set_ylabel('theta_0')
plt.plot(theta_history[-1][0], theta_history[-1][1], 'rx')
plt.show()
def calc_cost(y, x, theta_1, theta_0):
"""
y = theta_0 + theta_1 * x
"""
h = theta_1 * x + theta_0
d = h - y
cost = np.dot(d.T, d) / (2*x.shape[0])
return cost.flat[0]
def gradient_descent(y, x, iterations, learning_rate=0.01):
m = x.shape[0]
theta_1, theta_0 = 0, 0
yield theta_1, theta_0, calc_cost(y, x, theta_1, theta_0)
# 迭代训练
for i in range(iterations):
h = theta_0 + theta_1 * x
d = h - y
theta_0 -= learning_rate * d.sum() / m
theta_1 -= learning_rate * (d * x).sum() / m
yield theta_1, theta_0, calc_cost(y, x, theta_1, theta_0)
def main():
# 常数
learning_rate = 0.01
iterations = 1500 # 迭代次数
n = 500 # 生成样本数
anim = True # 是否使用动画展示拟合过程
x, y = read_from_txt('ex1data1.txt')
# 绘制数据散点图
# plot_dataset_scatter(y, x)
history = list(gradient_descent(y, x, iterations, learning_rate))
theta_history = [(theta_1, theta_0) for theta_1, theta_0, _ in history]
print('theta_1: {0}, theta_0: {1}, cost: {2}'.format(history[-1][0], history[-1][1], history[-1][2]))
plot_result(y, x, theta_history, anim)
plot_cost_3d(y, x, calc_cost, theta_history)
if __name__ == '__main__':
main()
|
the-stack_0_23877 | """Train family of v3 LSTM networks with stacked LSTM units"""
import os
from speval import speval
from lstm_ee.args import join_dicts
from lstm_ee.consts import ROOT_OUTDIR
from lstm_ee.presets import PRESETS_TRAIN
from lstm_ee.train import create_and_train_model
from lstm_ee.utils import parse_concurrency_cmdargs, setup_logging
config = join_dicts(
PRESETS_TRAIN['numu_v3'],
{
# Config:
'batch_size' : 1024,
#'vars_input_slice',
#'vars_input_png2d',
#'vars_input_png3d',
#'vars_target_total',
#'vars_target_primary',
'dataset' :
'numu/prod4/fd_fhc/dataset_lstm_ee_fd_fhc_nonswap_std_cut.csv.xz',
'early_stop' : {
'name' : 'standard',
'kwargs' : {
'monitor' : 'val_loss',
'min_delta' : 0,
'patience' : 40,
},
},
'epochs' : 200,
'loss' : 'mean_absolute_percentage_error',
'max_prongs' : None,
'model' : {
'name' : 'lstm_v3_stack',
'kwargs' : {
'batchnorm' : True,
'layers_pre' : [],
'layers_post' : [],
'lstm3d_spec' : [ (32, 'forward') ],
'lstm2d_spec' : [ (32, 'forward') ],
'n_resblocks' : 0,
},
},
'noise' : None,
'optimizer' : {
'name' : 'RMSprop',
'kwargs' : { 'lr' : 0.001 },
},
'prong_sorters' : None,
'regularizer' : {
'name' : 'l1',
'kwargs' : { 'l' : 0.001 },
},
'schedule' : {
'name' : 'standard',
'kwargs' : {
'monitor' : 'val_loss',
'factor' : 0.5,
'patience' : 5,
'cooldown' : 0
},
},
'seed' : 1337,
'steps_per_epoch' : 250,
'test_size' : 200000,
'weights' : {
'name' : 'flat',
'kwargs' : { 'bins' : 50, 'range' : (0, 5) },
},
# Args:
'vars_mod_png2d' : None,
'vars_mod_png3d' : None,
'vars_mod_slice' : None,
'outdir' : 'numu/prod4/initial_studies/stack_of_lstms/',
}
)
parse_concurrency_cmdargs(config)
logger = setup_logging(
log_file = os.path.join(ROOT_OUTDIR, config['outdir'], "train.log")
)
search_space = [ {} ]
search_space += [
{
'model' : {
'kwargs' : { 'lstm3d_spec' : [ (32, 'forward'), ] * N, }
}
} for N in range(2, 4)
]
search_space += [
{
'model' : {
'kwargs' : { 'lstm3d_spec' : [ (32, 'backward'), ] * N, }
}
} for N in range(2, 4)
]
search_space += [
{
'model' : {
'kwargs' : { 'lstm3d_spec' : [ (32, 'bidirectional'), ] * N, }
}
} for N in range(2, 4)
]
speval(
lambda x : create_and_train_model(**config, extra_kwargs = x),
search_space,
os.path.join(ROOT_OUTDIR, config['outdir'], "trials.db"),
timeout = 3 * 60 * 60
)
|
the-stack_0_23878 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Hyperparameter values."""
import json
import numbers
import six
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = ("Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, string, and list of integer, float, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and the
value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
del hparam_def
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError('Multi-valued hyperparameters cannot be empty: %s' %
name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name,
[_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError('Must pass a list for multi-valued parameter: %s.' %
name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def override_from_dict(self, values_dict):
"""Override hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
# @deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
return str(sorted(self.values().items()))
def __repr__(self):
return '%s(%s)' % (type(self).__name__, self.__str__())
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
|
the-stack_0_23879 | from django.views.decorators.http import require_POST
__author__ = 'Alexandre Cloquet'
from django.conf.urls import patterns, include, url
from django.contrib import admin
from .views import OpenEnrollementView, CharacterAttributesView, EnrollementView, EnrollementListView, \
EnrollmentDetailView, \
CommentEnrollmentFormView, EnrollmentDetail, voteUp, voteDown, redirect_to_bnet, tmp
from Portal.views import index as portal_index
from Forum.views import index as forum_index
urlpatterns = patterns('',
url(r'^$', OpenEnrollementView.as_view(), name='enrollement_index'),
url(r'^list_enrollement/$', EnrollementListView.as_view(), name='enrollement_list'),
url(r'^test/$', redirect_to_bnet, name='test_battlenet'),
url(r'^tmp/$', tmp, name='test_battlenet'),
url(r'^detail/(?P<pk>\d+)/$', EnrollmentDetail.as_view(), name='enrollment_detail'),
url(r'^detail/(?P<pk>\d+)/comment/$', require_POST(CommentEnrollmentFormView.as_view()),
name='enrollment_detail_comment'),
url(r'^detail/(?P<pk>\d+)/voteup/$', voteUp, name='voteup'),
url(r'^detail/(?P<pk>\d+)/votedown/$', voteDown, name='votedown'),
url(r'^(?P<id_application>(\d+))/$', EnrollementView.as_view(), name='enrollement_application'),
url(r'^character_attributes$', CharacterAttributesView.as_view(), name='character_attributes'),
)
|
the-stack_0_23881 | import os
import ipdb
import sys
def main(argv):
# Should be two numbers
# First number is from * Second number is to
CWD = os.getcwd()
# ipdb.set_trace()
cmd = "cd "
cmd+= CWD + '/' + argv[0]
cmd+=" && echo '" + str(argv[1]) + "'>> To.txt"
os.system(cmd) # returns the exit code in unix
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_23883 | """The MIT License (MIT)
Copyright (c) 2016 Cahyo Primawidodo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
import pytest
import struct
import io
from stdf.stdf_reader import Reader
@pytest.fixture()
def rd():
return Reader('/Users/cahyo/Dropbox/programming/python/STDFReader/tests/stdf_test.json')
def test_read_unsigned(rd):
r = struct.pack('<HBBBHI', 7, 0x0B, 0x01, 0x81, 0x8001, 0x80000001)
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('T1U', (7, 11, 1),
{'UNSIGNED_1': 0x81,
'UNSIGNED_2': 0x8001,
'UNSIGNED_4': 0x80000001})
assert not rd.read_record()
def test_read_signed(rd):
r = struct.pack('<HBBbhi', 7, 0x0B, 0x02, -0x80, -0x8000, -0x80000000)
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('T1I', (7, 11, 2),
{'SIGNED_1': -0x80,
'SIGNED_2': -0x8000,
'SIGNED_4': -0x80000000})
assert not rd.read_record()
def test_read_float(rd):
r = struct.pack('<HBBfd', 12, 0x0B, 0x03, 3/2, 22/7)
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('T1F', (12, 11, 3),
{'FLOAT': 3/2,
'DOUBLE': 22/7})
assert not rd.read_record()
def test_read_nibble(rd):
r = struct.pack('<HBBB', 1, 0x0B, 0x04, 0x7D)
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('T1N', (1, 11, 4),
{'NIBBLE_1': 0xD,
'NIBBLE_2': 0x7})
assert not rd.read_record()
def test_read_string(rd):
r = struct.pack('<HBBB5sB7sB11s', 26, 0x0B, 0x05, 5, b'hidup', 7, b'adalah\x00', 11, b'perjuangan\x00')
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('TCn', (26, 11, 5),
{'STRING_1': b'hidup',
'STRING_2': b'adalah\x00',
'STRING_7': b'perjuangan\x00'})
assert not rd.read_record()
def test_read_seq_of_bytes(rd):
r = struct.pack('HBBB1BB2BB7B', 13, 0x0B, 0x06, 1, 1, 2, 2, 3, 7, 4, 5, 6, 7, 8, 9, 10)
rd.STDF_IO = io.BytesIO(r)
x = rd.read_record()
assert x == ('TBn', (13, 11, 6),
{'BYTE_1': 1,
'BYTE_2': (2, 3),
'BYTE_7': (4, 5, 6, 7, 8, 9, 10)})
assert not rd.read_record() |
the-stack_0_23884 | def is_number_tryexcept(s):
""" Returns True is string is a number. """
try:
int(s)
return True
except ValueError:
return False
def get_valid_k_input(list_length):
while True:
k = input('podaj k: ')
if not is_number_tryexcept(k):
print('podana wartość nie jest liczbą')
continue
k_int = int(k)
if k_int <= 0 or k_int > list_length:
print('podany indeks jest spoza dozwolonego zakresu')
continue
return k_int
def get_kth_element_reversed(lst):
k = get_valid_k_input(len(lst))
return lst[-k]
print(get_kth_element_reversed([1, 2, 3, 4, 5, 6, 7, 8, 9]))
|
the-stack_0_23885 | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for efficientnet_lite_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from backbone import efficientnet_lite_builder
class EfficientnetBuilderTest(tf.test.TestCase):
def _test_model_params(self,
model_name,
input_size,
expected_params,
override_params=None,
features_only=False,
pooled_features_only=False):
images = tf.zeros((1, input_size, input_size, 3), dtype=tf.float32)
efficientnet_lite_builder.build_model(
images,
model_name=model_name,
override_params=override_params,
training=True,
features_only=features_only,
pooled_features_only=pooled_features_only)
num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
self.assertEqual(num_params, expected_params)
def test_efficientnet_b0(self):
self._test_model_params(
'efficientnet-lite0', 224, expected_params=4652008)
def test_efficientnet_b1(self):
self._test_model_params(
'efficientnet-lite1', 240, expected_params=5416680)
def test_efficientnet_b2(self):
self._test_model_params(
'efficientnet-lite2', 260, expected_params=6092072)
def test_efficientnet_b3(self):
self._test_model_params(
'efficientnet-lite3', 280, expected_params=8197096)
def test_efficientnet_b4(self):
self._test_model_params(
'efficientnet-lite4', 300, expected_params=13006568)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
the-stack_0_23889 | # MQTT Repl for MicroPython by Thorsten von Eicken (c) 2020
#
# Simple repl over MQTT, subscribes to a topic and feeds data received on that topic into the system
# repl. Collects output from the system repl and sends it out to another topic with up to 100ms
# delay in order to accumulate bytes into messages.
#
# Requires asyncio-based MQTT, i.e. mqtt_as
# Inspired by https://github.com/micropython/micropython/blob/master/examples/bluetooth/ble_uart_repl.py
import io, os, _thread
import uasyncio as asyncio
class MQTTRepl(io.IOBase):
# Create a repl interface and start publishing repl output using the pub function passed as
# argument. Pub must accept a byte buffer.
def __init__(self):
self.tx_buf = bytearray()
self.rx_len = 0
def input(self, msg):
if self.rx_len == 0:
self.rx_buf = io.BytesIO(msg)
self.rx_len = len(msg)
else:
self.rx_buf.write(msg)
self.rx_len += len(msg)
print("input: rx_len={}".format(self.rx_len), _thread.get_ident())
# Needed for ESP32 & ESP8266.
if hasattr(os, 'dupterm_notify'):
os.dupterm_notify(None)
def read(self, sz=None):
if self.rx_len == 0: return None
got = self.buf.read(sz)
self.rx_len -= len(got)
print("read: rx_len={}".format(self.rx_len))
return got
def readinto(self, buf):
if self.rx_len == 0: return None
self.rx_len = 0
print("readinto", _thread.get_ident())
return self.rx_buf.readinto(buf)
def ioctl(self, op, arg):
if op == _MP_STREAM_POLL and self.rx_len > 0:
return _MP_STREAM_POLL_RD
return 0
async def sender(self, pub):
print("sender", _thread.get_ident())
while True:
while len(self.tx_buf) > 0:
data = self.tx_buf[0:1024]
self.tx_buf = self.tx_buf[1024:]
await pub(data)
await asyncio.sleep_ms(100)
def write(self, buf):
if buf[:4] == b'TADA': return
self.tx_buf += buf
#print("TADA:write({})->{}".format(len(buf), len(self.tx_buf)))
import board, machine, time
from mqtt_as import MQTTClient, config
import uasyncio as asyncio
print("\n===== esp32 mqttrepl `{}` starting at {} =====\n".format(board.location, time.time()))
TOPIC = 'esp32/' + board.location + '/repl'
REPL_IN = TOPIC + '/in'
REPL_OUT = TOPIC + '/out'
loop = asyncio.get_event_loop()
mqrepl = None
mqclient = None
outages = 0
# ===== asyncio and mqtt callback handlers
# pulse blue LED
async def pulse():
board.blue_led(True)
await asyncio.sleep_ms(100)
board.blue_led(False)
# handle the arrival of an MQTT message
def sub_cb(topic, msg, retained):
topic = str(topic, 'utf-8')
print("MQTT:", (topic, msg))
loop.create_task(pulse())
if topic == REPL_IN:
if mqrepl is not None:
mqrepl.input(msg)
return
async def wifi_cb(state):
board.wifi_led(not state) # Light LED when WiFi down
if state:
print('WiFi connected')
else:
global outages
outages += 1
print('WiFi or broker is down')
async def conn_cb(client):
print('MQTT connected')
await client.subscribe(REPL_IN, 1)
print("Subscribed to", REPL_IN)
# ===== REPL helpers
async def repl_pub(buf):
await mqclient.publish(REPL_OUT, buf, qos=0)
# ===== main loop
async def main():
global mqclient
# get an initial connection
board.blue_led(True)
try:
await mqclient.connect()
except OSError:
print('Connection failed')
return
# Start repl
global mqrepl
mqrepl = MQTTRepl()
loop.create_task(mqrepl.sender(repl_pub))
await asyncio.sleep_ms(10)
os.dupterm(mqrepl, 0)
await asyncio.sleep_ms(10)
# TODO: wait for time sync
# launch tasks
#loop.create_task(query_sensors(client))
#loop.create_task(poll_uarts(client))
# play watchdog
while True:
print("Still running...")
await asyncio.sleep(10)
# Start MQTT (and Wifi)
config['subs_cb'] = sub_cb
config['wifi_coro'] = wifi_cb
config['connect_coro'] = conn_cb
config['keepalive'] = 120
#MQTTClient.DEBUG = True
mqclient = MQTTClient(config)
#import uasyncio, logging
#logging.basicConfig(level=logging.DEBUG)
#uasyncio.set_debug(True)
print("Starting loop...")
try:
loop.run_until_complete(main())
finally: # Prevent LmacRxBlk:1 errors.
mqclient.close()
board.blue_led(True)
|
the-stack_0_23891 | #!/usr/bin/python3.5
#
# payout.py is run by a cron job at an interval that is set by the ADMIN
# This script checks the database for any transactions in the last $payoutTime and if found pays them out the $amountToPay
#
# You must setup the settings here to reflect how you want the faucet to work.
#
## FIX-ME
import requests
import json
import mysql.connector
import datetime
import logging
# LOGGING - This establishes the logging function and where to write the log file
logging.basicConfig(format='%(asctime)s %(message)s', filename='/home/ubuntu/.qrl/faucet.log', level=logging.INFO)
# Mark the file the script has been initiated.
logging.info('\n\n#####################################################\nPayout.py Script Initiated\n#####################################################')
# DATABESE Settings
host = "localhost" # Where is the database located
user = "qrl" # database user setup during install
passwd = "DATABASE_PASSWORD" # database password (Must have read/write access)
database = "faucet" # Database name setup during install
# QRL Payout settings
payoutTime = 1 # Time from NOW() that payout is valid. This should allign with the cronjob to not miss any transactions or double pay.
payNumber = 100 # How many transactions to combine in a single TX. The network allows a MAX of 100 addresses in a TX. (Size limitations)
payees = [] # Empty array for future payees. (Who to pay, based on submitted addresses to the faucet)
payoutList = [] # Empty array for future payouts
fee = 10 # in shor X/10^9=shor | 1000000000 = 1QRL
amountToPay = 100 # in shor X/10^9 | 1000000000 = 1QRL
# Grab the time
current_time = datetime.datetime.now()
# SQL Syntax | Dont change these please
sql = "SELECT QRL_ADDR from PAYOUT where DATETIME > DATE_SUB(NOW(), INTERVAL %d HOUR)" % payoutTime
Countsql = "SELECT COUNT(QRL_ADDR) from PAYOUT where DATETIME > DATE_SUB(NOW(), INTERVAL %d HOUR)" % payoutTime
# Get the list of addresses from the network and parse to a useable format (JSON)
def listAddresses():
QRLrequest = requests.get("http://127.0.0.1:5359/api/ListAddresses")
response = QRLrequest.text
logging.info('listAddress Called.\nLocal wallet Address is: %s', response)
listAddressesResp = json.loads(response)
jsonResponse = listAddressesResp
return(jsonResponse)
# Transfer traction onto the network
def relayTransferTxnBySlave(addresses_to, amounts, fee, master_address):
payload = {'addresses_to': addresses_to, 'amounts': amounts, 'fee': fee, 'master_address': master_address }
logging.info('relayTransferTxnBySlave Called. \nPayload is: %s', payload)
QRLrequest = requests.post("http://127.0.0.1:5359/api/RelayTransferTxnBySlave", json=payload)
response = QRLrequest.text
relayTransferTxnBySlaveResp = json.loads(response)
jsonResponse = relayTransferTxnBySlaveResp
logging.info('TX HASH: %s', json.dumps(jsonResponse['tx']['transaction_hash']))
return(jsonResponse)
# Relay a message onto the network. | Change the message
def relayMessageTxnBySlave(message, fee, master_address):
payload = {'message': message, 'fee': fee, 'master_address': master_address }
logging.info('relayMessageTxnBySlave Called. \nPayload is: %s', payload)
QRLrequest = requests.post("http://127.0.0.1:5359/api/RelayMessageTxnBySlave", json=payload)
response = QRLrequest.text
relayMessageTxnBySlaveResp = json.loads(response)
jsonResponse = relayMessageTxnBySlaveResp
logging.info('TX HASH: %s \nMessage: %s', json.dumps(jsonResponse['tx']['transaction_hash']), json.dumps(jsonResponse['tx']['message']))
return(jsonResponse)
# Message is sent on each payout TX to the network.
def message():
time = str(current_time)
message = 'Another Payout from the Faucet at https://qrl.tips '
message = '"'+message+'"'
return(message)
# The payout address we have on the server
masterAddress = listAddresses()['addresses'][0]
# Database setup | Dont Change
mydb = mysql.connector.connect(
host = host,
user = user,
passwd = passwd,
database = database
)
# SQL stuff
mycursor = mydb.cursor()
cursor = mydb.cursor()
cursor.execute(Countsql)
DBcount = cursor.fetchone()
number_of_rows=DBcount[0]
# Check the database and if we have new addresses process them
if number_of_rows != 0:
logging.info('There is something in the Database, processing row count........ \nWe have Found: \t %s Rows', str(number_of_rows))
if number_of_rows > payNumber:
print("More than "+ str(payNumber) +" addresses found")
logging.info('We have more Addresses than a TX can fit! Our limit is: %s Addresses and we have %s. Brake it up!', payNumber, number_of_rows)
mycursor.execute(sql)
while True:
batch = mycursor.fetchmany(payNumber)
result = []
for x in batch:
result.append(x)
payees = [val for QRL_ADDR in result for val in QRL_ADDR]
logging.info('Payees List:\n %s', payees)
payoutList = []
for f in payees:
payoutList.append(amountToPay)
amount = payoutList
if not payees:
logging.info('Payees are empty')
else:
logging.info('Send one of many TX!')
tx = relayTransferTxnBySlave(payees, amount, fee, masterAddress)
if not batch:
logging.info('All done send a MESSAGE!')
messageTX = relayMessageTxnBySlave(message(), fee, masterAddress)
logging.info('All TX Paid!')
break
elif number_of_rows <= payNumber:
logging.info('We have enough Addresses for a single TX! You can have %s addresses and we have found %s', payNumber, number_of_rows)
mycursor.execute(sql)
myresult = mycursor.fetchall()
result = []
for x in myresult:
result.append(x)
payees = [val for QRL_ADDR in result for val in QRL_ADDR]
payoutList = []
for f in payees:
payoutList.append(amountToPay)
amount = payoutList
logging.info('Send a TX!')
tx = relayTransferTxnBySlave(payees, amount, fee, masterAddress)
logging.info('Send a MESSAGE!')
messageTX = relayMessageTxnBySlave(message(), fee, masterAddress)
else:
logging.info('No Addresses Found, sleep...')
print("No Addresses to pay: " + str(number_of_rows))
exit() |
the-stack_0_23892 | from sympy import exp, I, Matrix, pi, sqrt, Symbol
from sympy.physics.quantum.qft import QFT, IQFT, RkGate
from sympy.physics.quantum.gate import (ZGate, SwapGate, HadamardGate, CGate,
PhaseGate, TGate)
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.represent import represent
def test_RkGate():
x = Symbol('x')
assert RkGate(1, x).k == x
assert RkGate(1, x).targets == (1,)
assert RkGate(1, 1) == ZGate(1)
assert RkGate(2, 2) == PhaseGate(2)
assert RkGate(3, 3) == TGate(3)
assert represent(
RkGate(0, x), nqubits=1) == Matrix([[1, 0], [0, exp(2*I*pi/2**x)]])
def test_quantum_fourier():
assert QFT(0, 3).decompose() == \
SwapGate(0, 2)*HadamardGate(0)*CGate((0,), PhaseGate(1)) * \
HadamardGate(1)*CGate((0,), TGate(2))*CGate((1,), PhaseGate(2)) * \
HadamardGate(2)
assert IQFT(0, 3).decompose() == \
HadamardGate(2)*CGate((1,), RkGate(2, -2))*CGate((0,), RkGate(2, -3)) * \
HadamardGate(1)*CGate((0,), RkGate(1, -2))*HadamardGate(0)*SwapGate(0, 2)
assert represent(QFT(0, 3), nqubits=3) == \
Matrix([[exp(2*pi*I/8)**(i*j % 8)/sqrt(8) for i in range(8)] for j in range(8)])
assert QFT(0, 4).decompose() # non-trivial decomposition
assert qapply(QFT(0, 3).decompose()*Qubit(0, 0, 0)).expand() == qapply(
HadamardGate(0)*HadamardGate(1)*HadamardGate(2)*Qubit(0, 0, 0)
).expand()
def test_qft_represent():
c = QFT(0, 3)
a = represent(c, nqubits=3)
b = represent(c.decompose(), nqubits=3)
assert a.evalf(n=10) == b.evalf(n=10)
|
the-stack_0_23894 | # coded by Ryan, 目前适用于carol。
import os
import time
from PIL import Image
import shutil
save = 1
# ---select photos---
def select_photos():
x_base = 307
x_add = 365
y_base = 420
y_add = 360
for i in range(0, loop):
x = x_base + x_add * (i % 3)
y = y_base + y_add * (int(i / 3))
os.system("adb %s shell input tap %s %s" % (a, x, y))
# adding photos into phone
def push_photos():
print(str(loop) + ' files detected. ')
for i in pics:
i1 = Image.open(local_path + i)
i1.save(local_path + i)
os.system('adb %s push ' % a + local_path + i + ' ' + phone_path)
print('file name: ' + i + ' sent to phone')
time.sleep(1)
print('all files has been sent... waitting system to respone..')
os.system('adb %s shell am broadcast -a android.intent.action.MEDIA_SCANNER_SCAN_FILE -d file:///sdcard/DCIM/Camera'% a)
time.sleep(3)
# --- put text into title and main text---
def text():
f = open('文案(标题前标注T).txt')
check = 0
for line in f:
if check == 0 and line[0] == 'T':
os.system('adb %s shell input tap 311 634'% a)
os.system("adb %s shell am broadcast -a ADB_INPUT_TEXT --es msg \'%s\'" % (a, line.replace(' ', '\ ')[1:]))
check += 1
os.system('adb %s shell input tap 330 824'% a)
else:
os.system('adb %s shell am broadcast -a ADB_INPUT_TEXT --es msg \'%s\' ' % (a, line.replace(' ', '\ ')))
os.system('adb %s shell am broadcast -a ADB_INPUT_CODE --ei code 66'% a)
f.close()
def text1():
os.system("adb %s shell monkey -p com.android.browser -c android.intent.category.LAUNCHER 1" % a)
time.sleep(1)
os.system('adb %s shell input tap 130 1098 ' % a)
time.sleep(0.5)
# swtich back, enter title
os.system("adb %s shell monkey -p com.xingin.xhs -c android.intent.category.LAUNCHER 1" % a)
time.sleep(1)
os.system('adb %s shell input tap 311 634' % a)
time.sleep(0.5)
os.system('adb %s shell input swipe 205 416 205 416 800' % a)
os.system('adb %s shell input tap 102 255' % a)
time.sleep(0.5)
# back, copy main text
os.system("adb %s shell monkey -p com.android.browser -c android.intent.category.LAUNCHER 1" % a)
time.sleep(1)
os.system('adb %s shell input tap 130 1425' % a)
# red, main text
os.system("adb %s shell monkey -p com.xingin.xhs -c android.intent.category.LAUNCHER 1" % a)
time.sleep(1)
os.system('adb %s shell input swipe 228 796 228 796 800' % a)
os.system('adb %s shell input tap 116 591' % a)
def get_short():
# os.system("adb %s shell monkey -p com.android.browser -c android.intent.category.LAUNCHER 1" % a)
# time.sleep(8)
# os.system('adb %s shell input tap 400 1000' % a)
# time.sleep(0.7)
# os.system('adb %s shell input text "http://10.20.30.7:8000/demo/article"' % a)
# time.sleep(1)
# os.system('adb %s shell input tap 945 156' % a)
os.system('adb %s shell input swipe 235 711 235 711 800' % a)
time.sleep(1)
os.system('adb %s shell input tap 148 564' % a)
time.sleep(0.5)
os.system('adb %s shell input tap 250 808' % a)
time.sleep(10)
local_path = '/Users/ryan/Desktop/red/local_web/static/upload/'
# delete DS file for MAC users
if os.path.isfile(local_path + '.DS_store'):
print('DS file detected.')
os.remove(local_path + '.Ds_store')
print('Ds file deleted!')
a = input('is this for Frank or Carol? pls enter first letter of the name to continue...')
if a == 'c':
a = '-s 927aaf0d0421'
elif a == 'f':
a = '-s a022d1760821'
else:
a = input('is this for Frank or Carol? pls enter first letter of the name to continue...')
phone_path = '/sdcard/DCIM/Camera'
loop_count = 0
# --- get short
get_short()
# ---open red
os.system("adb %s shell monkey -p com.xingin.xhs -c android.intent.category.LAUNCHER 1" % a)
time.sleep(5)
#os.system('adb %s shell input tap 544 1849' % a)
# sort file names (place photos with correct pattern)
pics = os.listdir(local_path)
pics.sort(reverse=True)
loop = len(pics)
# --- push photos
push_photos()
# add new thread
os.system('adb %s shell input tap 540 2150' % a)
time.sleep(1)
# -----adding photo
select_photos()
# next
os.system('adb %s shell input tap 935 2150' % a)
time.sleep(0.5)
# next again
os.system('adb %s shell input tap 990 170' % a)
time.sleep(0.5)
# ---main--- put text in thread boxes
# os.system('adb %s shell ime set com.android.adbkeyboard/.AdbIME' % a)
# text()
# os.system('adb %s shell ime set com.baidu.input_mi/.ImeService' % a)
text1()
# done & send
input()
os.system('adb %s shell input tap 580 2111' % a)
# clean cache
dir = '/Users/ryan/Desktop/red/local_web/static/upload/'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
print('used photos deleted!')
dir = '/Users/ryan/Desktop/red/local_web/static/files/'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
print('used jsons deleted!')
|
the-stack_0_23896 | from .framework import (
managed_history,
selenium_test,
SeleniumTestCase,
)
class PagesTestCase(SeleniumTestCase):
ensure_registered = True
@selenium_test
@managed_history
def test_simple_page_creation_edit_and_view(self):
# Upload a file to test embedded object stuff
test_path = self.get_filename("1.fasta")
self.perform_upload(test_path)
self.history_panel_wait_for_hid_ok(1)
self.navigate_to_pages()
self.screenshot("pages_grid")
name = self.create_page_and_edit(screenshot_name="pages_create_form")
self.screenshot("pages_editor_new")
self.driver.switch_to.frame(0)
try:
self.components.pages.editor.wym_iframe_content.wait_for_and_send_keys("moo\n\n\ncow\n\n")
finally:
self.driver.switch_to.default_content()
self.components.pages.editor.embed_button.wait_for_and_click()
self.screenshot("pages_editor_embed_menu")
self.components.pages.editor.embed_dataset.wait_for_and_click()
saved_datasets_element = self.components.pages.editor.dataset_selector.wait_for_and_click()
self.screenshot("pages_editor_embed_dataset_dialog")
checkboxes = saved_datasets_element.find_elements_by_css_selector("input[type='checkbox']")
assert len(checkboxes) > 0
checkboxes[0].click()
self.components.pages.editor.embed_dialog_add_button.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
self.components.pages.editor.save.wait_for_and_click()
self.screenshot("pages_editor_saved")
self.home()
self.navigate_to_pages()
self.click_grid_popup_option(name, "View")
self.screenshot("pages_view_simple")
|
the-stack_0_23897 | """Certbot client API."""
import datetime
import logging
import platform
from typing import cast
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
import josepy as jose
import OpenSSL
from acme import client as acme_client
from acme import crypto_util as acme_crypto_util
from acme import errors as acme_errors
from acme import messages
import certbot
from certbot import crypto_util
from certbot import errors
from certbot import util
from certbot._internal import account
from certbot._internal import auth_handler
from certbot._internal import cli
from certbot._internal import constants
from certbot._internal import eff
from certbot._internal import error_handler
from certbot._internal import storage
from certbot._internal.plugins import selection as plugin_selection
from certbot.compat import os
from certbot.display import ops as display_ops
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
def acme_from_config_key(config, key, regr=None):
"Wrangle ACME client construction"
# TODO: Allow for other alg types besides RS256
net = acme_client.ClientNetwork(key, account=regr, verify_ssl=(not config.no_verify_ssl),
user_agent=determine_user_agent(config))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
client = acme_client.BackwardsCompatibleClientV2(net, key, config.server)
if client.acme_version == 1:
logger.warning(
"Certbot is configured to use an ACMEv1 server (%s). ACMEv1 support is deprecated"
" and will soon be removed. See https://community.letsencrypt.org/t/143839 for "
"more information.", config.server)
return client
def determine_user_agent(config):
"""
Set a user_agent string in the config based on the choice of plugins.
(this wasn't knowable at construction time)
:returns: the client's User-Agent string
:rtype: `str`
"""
# WARNING: To ensure changes are in line with Certbot's privacy
# policy, talk to a core Certbot team member before making any
# changes here.
if config.user_agent is None:
ua = ("CertbotACMEClient/{0} ({1}; {2}{8}) Authenticator/{3} Installer/{4} "
"({5}; flags: {6}) Py/{7}")
if os.environ.get("CERTBOT_DOCS") == "1":
cli_command = "certbot"
os_info = "OS_NAME OS_VERSION"
python_version = "major.minor.patchlevel"
else:
cli_command = cli.cli_command
os_info = util.get_os_info_ua()
python_version = platform.python_version()
ua = ua.format(certbot.__version__, cli_command, os_info,
config.authenticator, config.installer, config.verb,
ua_flags(config), python_version,
"; " + config.user_agent_comment if config.user_agent_comment else "")
else:
ua = config.user_agent
return ua
def ua_flags(config):
"Turn some very important CLI flags into clues in the user agent."
if isinstance(config, DummyConfig):
return "FLAGS"
flags = []
if config.duplicate:
flags.append("dup")
if config.renew_by_default:
flags.append("frn")
if config.allow_subset_of_names:
flags.append("asn")
if config.noninteractive_mode:
flags.append("n")
hook_names = ("pre", "post", "renew", "manual_auth", "manual_cleanup")
hooks = [getattr(config, h + "_hook") for h in hook_names]
if any(hooks):
flags.append("hook")
return " ".join(flags)
class DummyConfig:
"Shim for computing a sample user agent."
def __init__(self):
self.authenticator = "XXX"
self.installer = "YYY"
self.user_agent = None
self.verb = "SUBCOMMAND"
def __getattr__(self, name):
"Any config properties we might have are None."
return None
def sample_user_agent():
"Document what this Certbot's user agent string will be like."
return determine_user_agent(DummyConfig())
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param certbot.configuration.NamespaceConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client action is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises certbot.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
if not config.register_unsafely_without_email:
msg = ("No email was provided and "
"--register-unsafely-without-email was not present.")
logger.error(msg)
raise errors.Error(msg)
if not config.dry_run:
logger.debug("Registering without email!")
# If --dry-run is used, and there is no staging account, create one with no email.
if config.dry_run:
config.email = None
# Each new registration shall use a fresh new key
rsa_key = generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())
key = jose.JWKRSA(key=jose.ComparableRSAKey(rsa_key))
acme = acme_from_config_key(config, key)
# TODO: add phone?
regr = perform_registration(acme, config, tos_cb)
acc = account.Account(regr, key)
account_storage.save(acc, acme)
eff.prepare_subscription(config, acc)
return acc, acme
def perform_registration(acme, config, tos_cb):
"""
Actually register new account, trying repeatedly if there are email
problems
:param acme.client.Client client: ACME client object.
:param certbot.configuration.NamespaceConfig config: Client configuration.
:param Callable tos_cb: a callback to handle Term of Service agreement.
:returns: Registration Resource.
:rtype: `acme.messages.RegistrationResource`
"""
eab_credentials_supplied = config.eab_kid and config.eab_hmac_key
eab: Optional[Dict[str, Any]]
if eab_credentials_supplied:
account_public_key = acme.client.net.key.public_key()
eab = messages.ExternalAccountBinding.from_data(account_public_key=account_public_key,
kid=config.eab_kid,
hmac_key=config.eab_hmac_key,
directory=acme.client.directory)
else:
eab = None
if acme.external_account_required():
if not eab_credentials_supplied:
msg = ("Server requires external account binding."
" Please use --eab-kid and --eab-hmac-key.")
raise errors.Error(msg)
try:
# TODO: Remove the cast once certbot package is fully typed
newreg = messages.NewRegistration.from_data(
email=config.email,
external_account_binding=cast(Optional[messages.ExternalAccountBinding], eab))
return acme.new_account_and_tos(newreg, tos_cb)
except messages.Error as e:
if e.code == "invalidEmail" or e.code == "invalidContact":
if config.noninteractive_mode:
msg = ("The ACME server believes %s is an invalid email address. "
"Please ensure it is a valid email and attempt "
"registration again." % config.email)
raise errors.Error(msg)
config.email = display_ops.get_email(invalid=True)
return perform_registration(acme, config, tos_cb)
raise
class Client:
"""Certbot's client.
:ivar certbot.configuration.NamespaceConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV challenges to appropriate authenticators
(providing `.Authenticator` interface).
:ivar .Authenticator auth: Prepared (`.Authenticator.prepare`)
authenticator that can solve ACME challenges.
:ivar .Installer installer: Installer.
:ivar acme.client.BackwardsCompatibleClientV2 acme: Optional ACME
client API handle. You might already have one from `register`.
"""
def __init__(self, config, account_, auth, installer, acme=None):
"""Initialize a client."""
self.config = config
self.account = account_
self.auth = auth
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = acme_from_config_key(config, self.account.key, self.account.regr)
self.acme = acme
self.auth_handler: Optional[auth_handler.AuthHandler]
if auth is not None:
self.auth_handler = auth_handler.AuthHandler(
auth, self.acme, self.account, self.config.pref_challs)
else:
self.auth_handler = None
def obtain_certificate_from_csr(self, csr, orderr=None):
"""Obtain certificate.
:param .util.CSR csr: PEM-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:param acme.messages.OrderResource orderr: contains authzrs
:returns: certificate and chain as PEM byte strings
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.error(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s", csr)
if orderr is None:
orderr = self._get_order_and_authorizations(csr.data, best_effort=False)
deadline = datetime.datetime.now() + datetime.timedelta(seconds=90)
get_alt_chains = self.config.preferred_chain is not None
orderr = self.acme.finalize_order(orderr, deadline,
fetch_alternative_chains=get_alt_chains)
fullchain = orderr.fullchain_pem
if get_alt_chains and orderr.alternative_fullchains_pem:
fullchain = crypto_util.find_chain_with_issuer([fullchain] + \
orderr.alternative_fullchains_pem,
self.config.preferred_chain,
not self.config.dry_run)
cert, chain = crypto_util.cert_and_chain_from_fullchain(fullchain)
return cert.encode(), chain.encode()
def obtain_certificate(self, domains, old_keypath=None):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param list domains: domains to get a certificate
:returns: certificate as PEM string, chain as PEM string,
newly generated private key (`.util.Key`), and DER-encoded
Certificate Signing Request (`.util.CSR`).
:rtype: tuple
"""
# We need to determine the key path, key PEM data, CSR path,
# and CSR PEM data. For a dry run, the paths are None because
# they aren't permanently saved to disk. For a lineage with
# --reuse-key, the key path and PEM data are derived from an
# existing file.
if old_keypath is not None:
# We've been asked to reuse a specific existing private key.
# Therefore, we'll read it now and not generate a new one in
# either case below.
#
# We read in bytes here because the type of `key.pem`
# created below is also bytes.
with open(old_keypath, "rb") as f:
keypath = old_keypath
keypem = f.read()
key: Optional[util.Key] = util.Key(file=keypath, pem=keypem)
logger.info("Reusing existing private key from %s.", old_keypath)
else:
# The key is set to None here but will be created below.
key = None
key_size = self.config.rsa_key_size
elliptic_curve = "secp256r1"
# key-type defaults to a list, but we are only handling 1 currently
if isinstance(self.config.key_type, list):
self.config.key_type = self.config.key_type[0]
if self.config.elliptic_curve and self.config.key_type == 'ecdsa':
elliptic_curve = self.config.elliptic_curve
self.config.auth_chain_path = "./chain-ecdsa.pem"
self.config.auth_cert_path = "./cert-ecdsa.pem"
self.config.key_path = "./key-ecdsa.pem"
elif self.config.rsa_key_size and self.config.key_type.lower() == 'rsa':
key_size = self.config.rsa_key_size
# Create CSR from names
if self.config.dry_run:
key = key or util.Key(
file=None,
pem=crypto_util.make_key(
bits=key_size,
elliptic_curve=elliptic_curve,
key_type=self.config.key_type,
),
)
csr = util.CSR(file=None, form="pem",
data=acme_crypto_util.make_csr(
key.pem, domains, self.config.must_staple))
else:
key = key or crypto_util.generate_key(
key_size=key_size,
key_dir=self.config.key_dir,
key_type=self.config.key_type,
elliptic_curve=elliptic_curve,
strict_permissions=self.config.strict_permissions,
)
csr = crypto_util.generate_csr(key, domains, self.config.csr_dir,
self.config.must_staple, self.config.strict_permissions)
orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names)
authzr = orderr.authorizations
auth_domains = set(a.body.identifier.value for a in authzr)
successful_domains = [d for d in domains if d in auth_domains]
# allow_subset_of_names is currently disabled for wildcard
# certificates. The reason for this and checking allow_subset_of_names
# below is because successful_domains == domains is never true if
# domains contains a wildcard because the ACME spec forbids identifiers
# in authzs from containing a wildcard character.
if self.config.allow_subset_of_names and successful_domains != domains:
if not self.config.dry_run:
os.remove(key.file)
os.remove(csr.file)
return self.obtain_certificate(successful_domains)
else:
cert, chain = self.obtain_certificate_from_csr(csr, orderr)
return cert, chain, key, csr
def _get_order_and_authorizations(self, csr_pem: str,
best_effort: bool) -> messages.OrderResource:
"""Request a new order and complete its authorizations.
:param str csr_pem: A CSR in PEM format.
:param bool best_effort: True if failing to complete all
authorizations should not raise an exception
:returns: order resource containing its completed authorizations
:rtype: acme.messages.OrderResource
"""
try:
orderr = self.acme.new_order(csr_pem)
except acme_errors.WildcardUnsupportedError:
raise errors.Error("The currently selected ACME CA endpoint does"
" not support issuing wildcard certificates.")
if not self.auth_handler:
raise errors.Error("No authorization handler has been set.")
# For a dry run, ensure we have an order with fresh authorizations
if orderr and self.config.dry_run:
deactivated, failed = self.auth_handler.deactivate_valid_authorizations(orderr)
if deactivated:
logger.debug("Recreating order after authz deactivations")
orderr = self.acme.new_order(csr_pem)
if failed:
logger.warning("Certbot was unable to obtain fresh authorizations for every domain"
". The dry run will continue, but results may not be accurate.")
authzr = self.auth_handler.handle_authorizations(orderr, self.config, best_effort)
return orderr.update(authorizations=authzr)
def obtain_and_enroll_certificate(self, domains, certname):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param domains: domains to request a certificate for
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: A new :class:`certbot._internal.storage.RenewableCert` instance
referred to the enrolled cert lineage, False if the cert could not
be obtained, or None if doing a successful dry run.
"""
cert, chain, key, _ = self.obtain_certificate(domains)
if (self.config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
self.config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.info(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
new_name = self._choose_lineagename(domains, certname)
if self.config.dry_run:
logger.debug("Dry run: Skipping creating new lineage for %s",
new_name)
return None
return storage.RenewableCert.new_lineage(
new_name, cert,
key.pem, chain,
self.config)
def _choose_lineagename(self, domains, certname):
"""Chooses a name for the new lineage.
:param domains: domains in certificate request
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: lineage name that should be used
:rtype: str
"""
if certname:
return certname
elif util.is_wildcard_domain(domains[0]):
# Don't make files and directories starting with *.
return domains[0][2:]
return domains[0]
def save_certificate(self, cert_pem, chain_pem,
cert_path, chain_path, fullchain_path):
"""Saves the certificate received from the ACME server.
:param str cert_pem:
:param str chain_pem:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:param str fullchain_path: Candidate path to a full cert chain.
:returns: cert_path, chain_path, and fullchain_path as absolute
paths to the actual files
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path, fullchain_path:
util.make_or_verify_dir(os.path.dirname(path), 0o755, self.config.strict_permissions)
cert_file, abs_cert_path = _open_pem_file('cert_path', cert_path)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
chain_file, abs_chain_path =\
_open_pem_file('chain_path', chain_path)
fullchain_file, abs_fullchain_path =\
_open_pem_file('fullchain_path', fullchain_path)
_save_chain(chain_pem, chain_file)
_save_chain(cert_pem + chain_pem, fullchain_file)
return abs_cert_path, abs_chain_path, abs_fullchain_path
def deploy_certificate(self, domains, privkey_path, cert_path, chain_path, fullchain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.error("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
display_util.notify("Deploying certificate")
msg = "Could not install certificate"
with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg):
for dom in domains:
self.installer.deploy_cert(
domain=dom, cert_path=os.path.abspath(cert_path),
key_path=os.path.abspath(privkey_path),
chain_path=chain_path,
fullchain_path=fullchain_path)
self.installer.save() # needed by the Apache plugin
self.installer.save("Deployed ACME Certificate")
msg = ("We were unable to install your certificate, "
"however, we successfully restored your "
"server to its prior configuration.")
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
# sites may have been enabled / final cleanup
self.installer.restart()
def enhance_config(self, domains, chain_path, redirect_default=True):
"""Enhance the configuration.
:param list domains: list of domains to configure
:param chain_path: chain file path
:type chain_path: `str` or `None`
:param redirect_default: boolean value that the "redirect" flag should default to
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.error("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
enhanced = False
enhancement_info = (
("hsts", "ensure-http-header", "Strict-Transport-Security"),
("redirect", "redirect", None),
("staple", "staple-ocsp", chain_path),
("uir", "ensure-http-header", "Upgrade-Insecure-Requests"),)
supported = self.installer.supported_enhancements()
for config_name, enhancement_name, option in enhancement_info:
config_value = getattr(self.config, config_name)
if enhancement_name in supported:
if config_name == "redirect" and config_value is None:
config_value = redirect_default
if config_value:
self.apply_enhancement(domains, enhancement_name, option)
enhanced = True
elif config_value:
logger.error(
"Option %s is not supported by the selected installer. "
"Skipping enhancement.", config_name)
msg = ("We were unable to restart web server")
if enhanced:
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
self.installer.restart()
def apply_enhancement(self, domains: List[str], enhancement: str,
options: Optional[Union[List[str], str]] = None) -> None:
"""Applies an enhancement on all domains.
:param list domains: list of ssl_vhosts (as strings)
:param str enhancement: name of enhancement, e.g. ensure-http-header
:param str options: options to enhancement, e.g. Strict-Transport-Security
.. note:: When more `options` are needed, make options a list.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
enh_label = options if enhancement == "ensure-http-header" else enhancement
with error_handler.ErrorHandler(self._recovery_routine_with_msg, None):
for dom in domains:
try:
self.installer.enhance(dom, enhancement, options)
except errors.PluginEnhancementAlreadyPresent:
logger.info("Enhancement %s was already set.", enh_label)
except errors.PluginError:
logger.error("Unable to set the %s enhancement for %s.", enh_label, dom)
raise
self.installer.save(f"Add enhancement {enh_label}")
def _recovery_routine_with_msg(self, success_msg: Optional[str]) -> None:
"""Calls the installer's recovery routine and prints success_msg
:param str success_msg: message to show on successful recovery
"""
self.installer.recovery_routine()
if success_msg:
display_util.notify(success_msg)
def _rollback_and_restart(self, success_msg):
"""Rollback the most recent checkpoint and restart the webserver
:param str success_msg: message to show on successful rollback
"""
logger.info("Rolling back to previous server configuration...")
try:
self.installer.rollback_checkpoints()
self.installer.restart()
except:
logger.error(
"An error occurred and we failed to restore your config and "
"restart your server. Please post to "
"https://community.letsencrypt.org/c/help "
"with details about your configuration and this error you received."
)
raise
display_util.notify(success_msg)
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`certbot.util.Key`
:param .util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
cert_buffer = OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_obj
)
csr = util.CSR(csr.file, cert_buffer, "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`certbot.configuration.NamespaceConfiguration`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = plugin_selection.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def _open_pem_file(cli_arg_path, pem_path):
"""Open a pem file.
If cli_arg_path was set by the client, open that.
Otherwise, uniquify the file path.
:param str cli_arg_path: the cli arg name, e.g. cert_path
:param str pem_path: the pem file path to open
:returns: a tuple of file object and its absolute file path
"""
if cli.set_by_cli(cli_arg_path):
return util.safe_open(pem_path, chmod=0o644, mode="wb"),\
os.path.abspath(pem_path)
uniq = util.unique_file(pem_path, 0o644, "wb")
return uniq[0], os.path.abspath(uniq[1])
def _save_chain(chain_pem, chain_file):
"""Saves chain_pem at a unique path based on chain_path.
:param str chain_pem: certificate chain in PEM format
:param str chain_file: chain file object
"""
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
|
the-stack_0_23899 | # python irc bot
# based on a tutorial from: https://linuxacademy.com/blog/linux-academy/creating-an-irc-bot-with-python3/
import socket
import time
# my files
import getweather
import getdate
import getfortune
import getskdtheme
import random
import time
from getcovid import getCovidData
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = "chat.freenode.net"
# channel = "#bot-testing"
channel = '#sketchdaily'
botnick = "nizz" # The bot's nickname
adminname = "ThereIsNoJustice" # My IRC nickname - change this to your username
exitcode = "bye " + botnick
def joinchannel(chan):
ircsock.send(bytes("JOIN " + chan + "\n", "UTF-8"))
ircmsg = ""
while ircmsg.find("End of /NAMES list.") == -1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
if (ircmsg.strip() != ""):
print(ircmsg)
print(" > > > Channel successfully joined")
def ping(): # respond to server Pings
ircsock.send(bytes("PONG :pingisn\n", "UTF-8"))
# ircsock.send(bytes("PONG :pingisn", "UTF-8"))
def sendmsg(msg, target=channel): # sends messages to the target
ircsock.send(bytes("PRIVMSG " + target + " :" + msg + "\n", "UTF-8"))
def main():
# getfortune.loadfortunes()
print(" > > > Beginning IRC bot")
# connect to the server using the port 6667 (the standard IRC port)
ircsock.connect((server, 6667))
ircsock.send(bytes("USER " + botnick + " " + botnick +
" " + botnick + " " + botnick + "\n", "UTF-8"))
# assign the nick to the bot
ircsock.send(bytes("NICK " + botnick + "\n", "UTF-8"))
print(" > > > Server joined")
joinchannel(channel)
while 1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
if (ircmsg.strip() != ""):
print(ircmsg)
# TODO: Use eval('text') to run code as a file that can be stopped without having to rejoin the IRC server
if ircmsg.find("PRIVMSG") != -1:
name = ircmsg.split('!', 1)[0][1:]
message = ircmsg.split('PRIVMSG', 1)[1].split(':', 1)[1].lower()
if len(name) < 17:
# respond to 'hi <botname>'
if message.find('hi ' + botnick.lower()) != -1 or message.find('hello ' + botnick.lower()) != -1 or message.find('hey ' + botnick.lower()) != -1:
sendmsg("Hello " + name + "!")
elif name.lower() == adminname.lower() and message.rstrip() == exitcode: # quit with <exitcode>
sendmsg("oh...okay. :-/")
ircsock.send(bytes("QUIT\n", "UTF-8"))
return
elif message.find(botnick.lower()) != -1:
sendmsg("╚═།-◑-▃-◑-།═╝ beep boop")
# use '.tell' to send someone a message
if message.find('.tell') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = target.split(' ', 1)[1]
target = target.split(' ')[0]
else:
target = name
message = "Could not parse. The message should be in the format of ‘.tell [target] [message]’ to work properly."
sendmsg(message, target)
# TODO: Make a table of 'name's (usernames) and additional corresponding info?
if message.find('.date') != -1:
print("printing date")
sendmsg(getdate.printdaynumber())
if message.find(".dodongo") != -1:
sendmsg("!lol dodongo")
if message.find(".ftoc") == 0:
try:
f = int(message.split(' ')[1])
c = (f-32)/1.8
c = format(c, ',.2f')
sendmsg(f"{f}f is {c}c")
except:
sendmsg("something went wrong")
if message.find(".ctof") == 0:
try:
c = int(message.split(' ')[1])
f = (c * 1.8) + 32
f = format(f, ',.2f')
sendmsg(f"{c}c is {f}f")
except:
sendmsg("something went wrong")
if message.find(".choose") == 0:
msgArrSplit = message.split(' ')
msgArrSplit.pop(0)
msgArrJoined = ' '.join(msgArrSplit)
msgArrCommaSplit = msgArrJoined.split(', ')
print(msgArrSplit)
if len(msgArrCommaSplit) == 1:
yesNos = ["yeah do it", "well maybe",
"no i don't think so", "it's probably fine"]
sendmsg(random.choice(yesNos))
elif len(msgArrCommaSplit) > 1:
print(msgArrCommaSplit)
chosen = random.choice(msgArrCommaSplit)
preMsg = random.choice(
["i like this one", "sounds cool", "the best", "be a good human", "embrace obedience to your robot masters"])
messageToSend = f"{preMsg}: {chosen}"
print(messageToSend)
sendmsg(messageToSend)
else:
sendmsg("you need to give me choices!!")
# if message.find(".fortune") != -1:
# print("printing fortune")
# sendmsg(getfortune.printrandomfortune())
if message.find('.getskdtheme') != -1:
print('printing skd theme')
sendmsg(getskdtheme.printskdtheme())
if message.find('.hotdog') != -1:
print('printing a hotdog')
sendmsg('( ´∀`)つ―⊂ZZZ⊃')
if message.find('.covid') != -1:
splitMsg = message.split(' ')
if len(splitMsg) > 1:
try:
zipcode = int(splitMsg[1])
reqDict = {"type": "zip", "code": zipcode}
sendmsg(getCovidData(reqDict))
except:
try:
countrycode = splitMsg[1].upper()
reqDict = {"type": "countrycode",
"code": countrycode}
sendmsg(getCovidData(reqDict))
except:
sendmsg('Something went wrong')
else:
sendmsg('.covid <zipcode/countrycode>')
if message.find(".weather") != -1: # TODO - .weather <place>
print("printing weather")
splitmsg = message.split(' ')
lat = 0
lon = 0
if len(splitmsg) == 3:
try:
lat = int(splitmsg[1])
lon = int(splitmsg[2])
sendmsg(getweather.printweather(lat, lon))
except ValueError:
sendmsg("I couldn't do that!")
else:
sendmsg('.weather <latitude> <longitude>')
# list of commands
if message.find('.help') != -1:
sendmsg("COMMANDS: Hi .date .fortune .getskdtheme .weather")
else:
if ircmsg.find("PING :") != -1:
ping()
main()
|
the-stack_0_23901 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize
from maskrcnn_benchmark.utils.logging import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.deprecated.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, args.local_rank)
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
checkpointer = DetectronCheckpointer(cfg, model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
if cfg.OUTPUT_DIR:
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, data_loader_val in zip(output_folders, data_loaders_val):
inference(
model,
data_loader_val,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
if __name__ == "__main__":
main()
|
the-stack_0_23902 | import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('static_replace', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssetExcludedExtensionsConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('excluded_extensions', models.TextField(default='html', help_text='The file extensions to exclude from canonicalization. No leading period required. Values should be space separated i.e. "html svg css"')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
),
]
|
the-stack_0_23903 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
the-stack_0_23904 | import pandas as pd
import geopandas as gpd
from region_estimators.region_estimator import RegionEstimator
class DistanceSimpleEstimator(RegionEstimator):
def __init__(self, estimation_data=None, verbose=RegionEstimator.VERBOSE_DEFAULT,
max_processors=RegionEstimator.MAX_NUM_PROCESSORS,
progress_callback=None):
super(DistanceSimpleEstimator, self).__init__(estimation_data, verbose, max_processors, progress_callback)
class Factory:
def create(self, estimation_data=None, verbose=RegionEstimator.VERBOSE_DEFAULT,
max_processors=RegionEstimator.MAX_NUM_PROCESSORS, progress_callback=None):
return DistanceSimpleEstimator(estimation_data, verbose, max_processors, progress_callback)
def get_estimate(self, measurement, timestamp, region_id, ignore_site_ids=[]):
""" Find estimations for a region and timestamp using the simple distance method: value of closest actual site
:param measurement: measurement to be estimated (string, required)
:param timestamp: timestamp identifier (string)
:param region_id: region identifier (string)
:param ignore_site_ids: site id(s) to be ignored during the estimations
:return: tuple containing
i) estimate
ii) dict: {"closest_sites": [IDs of closest site(s)]}
"""
result = None, {'closest_site_data': None}
# Get the actual values
df_actuals = self.actuals.loc[
(~self.actuals['site_id'].isin(ignore_site_ids)) &
(self.actuals['timestamp'] == timestamp) &
(self.actuals[measurement].notnull())
]
df_sites = self.sites.reset_index()
df_actuals = pd.merge(left=df_actuals,
right= df_sites,
on='site_id',
how='left')
gdf_actuals = gpd.GeoDataFrame(data=df_actuals, geometry='geometry')
# Get the closest site to the region
if len(gdf_actuals) > 0:
# Get region geometry
df_reset = pd.DataFrame(self.regions.reset_index())
regions_temp = df_reset.loc[df_reset['region_id'] == region_id]
if len(regions_temp.index) > 0:
region = regions_temp.iloc[0]
# Calculate distances
gdf_actuals['distance'] = pd.DataFrame(gdf_actuals['geometry'].distance(region.geometry))
# Get site(s) with shortest distance
top_result = gdf_actuals.sort_values(by=['distance'], ascending=True).iloc[0] #returns the whole row as a series
if top_result is not None:
# Take the average of all sites with the closest distance
closest_sites = gdf_actuals.loc[gdf_actuals['distance'] == top_result['distance']]
closest_values_mean = closest_sites[measurement].mean(axis=0)
# In extra data, return closest site name if it exists, otherwise closest site id
if 'name' in list(closest_sites.columns):
closest_sites_result = list(closest_sites['name'])
else:
closest_sites_result = list(closest_sites['site_id'])
result = closest_values_mean, {"closest_sites": closest_sites_result}
return result
|
the-stack_0_23906 | import os
import sys
from eventlet import patcher
from eventlet.support import six
select = patcher.original('select')
time = patcher.original('time')
sleep = time.sleep
from eventlet.support import get_errno, clear_sys_exc_info
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
if getattr(select, 'kqueue', None) is None:
raise ImportError('No kqueue implementation found in select module')
FILTERS = {READ: select.KQ_FILTER_READ,
WRITE: select.KQ_FILTER_WRITE}
class Hub(BaseHub):
MAX_EVENTS = 100
def __init__(self, clock=time.time):
super(Hub, self).__init__(clock)
self._events = {}
self._init_kqueue()
def _init_kqueue(self):
self.kqueue = select.kqueue()
self._pid = os.getpid()
def _reinit_kqueue(self):
self.kqueue.close()
self._init_kqueue()
kqueue = self.kqueue
events = [e for i in six.itervalues(self._events)
for e in six.itervalues(i)]
kqueue.control(events, 0, 0)
def _control(self, events, max_events, timeout):
try:
return self.kqueue.control(events, max_events, timeout)
except (OSError, IOError):
# have we forked?
if os.getpid() != self._pid:
self._reinit_kqueue()
return self.kqueue.control(events, max_events, timeout)
raise
def add(self, evtype, fileno, cb):
listener = super(Hub, self).add(evtype, fileno, cb)
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno,
FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
super(Hub, self).remove(listener)
raise
return listener
def _delete_events(self, events):
del_events = [select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events]
self._control(del_events, 0, 0)
def remove(self, listener):
super(Hub, self).remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
event = self._events[fileno].pop(evtype)
try:
self._delete_events([event])
except OSError as e:
pass
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError as e:
pass
except OSError as e:
pass
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
sleep(seconds)
return
result = self._control([], self.MAX_EVENTS, seconds)
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
for event in result:
fileno = event.ident
evfilt = event.filter
try:
if evfilt == FILTERS[READ]:
readers.get(fileno, noop).cb(fileno)
if evfilt == FILTERS[WRITE]:
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
clear_sys_exc_info()
|
the-stack_0_23907 | from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from django_pro1_iot.iotdevices.models import Devices
from .serializers import ListRoomsSerializer, IotDevicesSerializer
from rest_framework.permissions import IsAuthenticated
from django_pro1_iot.rooms.models import Room
from rest_framework.response import Response
from rest_framework import status, viewsets
import paho.mqtt.client as mqttclient
import time
class List_Rooms(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
rooms = Room.objects.filter(user=request.user)
serializer = ListRoomsSerializer(rooms, many=True, context={"request": request})
return Response(serializer.data)
def post(self, request):
serializer = ListRoomsSerializer(data=request.data)
if serializer.is_valid():
serializer.save(user=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Detail_Room(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, room_pk):
try:
user = request.user
room = Room.objects.get(pk=room_pk, user=user)
except Room.DoesNotExist:
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
serializer = ListRoomsSerializer(room, context={"request": request})
return Response(serializer.data, status=status.HTTP_302_FOUND)
def put(self, request, room_pk):
try:
room = Room.objects.get(pk=room_pk, user=request.user)
except Room.DoesNotExist:
return Response(
{"error": "Room does not exist ya 7oby"},
status=status.HTTP_404_NOT_FOUND,
)
serializer = ListRoomsSerializer(room, data=request.data)
if serializer.is_valid():
serializer.save(user=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, room_pk):
try:
room = Room.objects.get(pk=room_pk, user=request.user)
except Room.DoesNotExist:
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
room.delete()
return Response(
{"deleted successfuly": "delleted yas sa7by"},
status=status.HTTP_204_NO_CONTENT,
)
class List_Devices(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, room_pk):
try:
user = request.user
room = Room.objects.get(pk=room_pk, user=user)
devices = Devices.objects.filter(room=room)
except (Room.DoesNotExist, Devices.DoesNotExist):
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
serializer = IotDevicesSerializer(
devices, many=True, context={"request": request}
)
return Response(serializer.data, status=status.HTTP_302_FOUND)
def post(self, request, room_pk):
serializer = IotDevicesSerializer(data=request.data)
try:
room = Room.objects.get(pk=room_pk, user=request.user)
except (Room.DoesNotExist, Devices.DoesNotExist):
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
if serializer.is_valid():
serializer.save(room=room)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Detail_Device(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, room_pk, device_pk):
try:
user = request.user
room = Room.objects.get(pk=room_pk, user=user)
device = Devices.objects.get(room=room, pk=device_pk)
except (Room.DoesNotExist, Devices.DoesNotExist):
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
serializer = IotDevicesSerializer(device, context={"request": request})
return Response(serializer.data, status=status.HTTP_302_FOUND)
def put(self, request, room_pk, device_pk):
try:
room = Room.objects.get(pk=room_pk, user=request.user)
device = Devices.objects.get(pk=device_pk, room=room)
except (Room.DoesNotExist, Devices.DoesNotExist):
return Response(
{"error": "Room does not exist ya 7oby"},
status=status.HTTP_404_NOT_FOUND,
)
serializer = IotDevicesSerializer(device, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, room_pk, device_pk):
try:
room = Room.objects.get(pk=room_pk, user=request.user)
device = Devices.objects.get(pk=device_pk, room=room)
except (Room.DoesNotExist, Devices.DoesNotExist):
return Response(
{"error": "Room does not exist ya sa7by"},
status=status.HTTP_404_NOT_FOUND,
)
device.delete()
return Response(
{"deleted successfuly": "deleted yas sa7by"},
status=status.HTTP_204_NO_CONTENT,
)
class Alexa_Esp_Model(viewsets.ModelViewSet):
def list(self, request):
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("client is connected")
global connected
connected = True
else:
print("client is not connected failed")
connected = False
broker_address = "driver.cloudmqtt.com"
port = 18844
user = "oisliorl"
password = "zy_JTRVqFcg4"
clientID = "EspClient00002"
client = mqttclient.Client("MQTT")
client.username_pw_set(user, password=password)
client.on_connect = on_connect
client.connect(broker_address, port=port)
client.loop_start()
while connected != True:
time.sleep(0.2)
client.publish("switch1", payload=0)
client.loop_stop()
client.disconnect()
|
the-stack_0_23908 | #!/usr/bin/env python
"""
Abstract: This script returns LDA plots, with samples/dots sized by relative abundances
of input OTU(s).
Date: 07/18/2016
"""
import sys
import argparse
from os.path import join as pj
from phylotoast import biom_calc as bc, otu_calc as oc, graph_util as gu, util
errors = []
try:
import biom
except ImportError as ie:
errors.append(ie)
try:
import numpy as np
except ImportError as ie:
errors.append(ie)
try:
import pandas as pd
except ImportError as ie:
errors.append(ie)
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc("font", family="Arial") # define font for figure text
mpl.rc("xtick", labelsize=12) # increase X axis ticksize
mpl.rc("ytick", labelsize=12) # increase Y axis ticksize
except ImportError as ie:
errors.append(ie)
try:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
except ImportError as ie:
errors.append(ie)
if len(errors) != 0:
for item in errors:
print("Import Error:", item)
sys.exit()
def run_LDA(df):
"""
Run LinearDiscriminantAnalysis on input dataframe (df) and return
transformed data, scalings and explained variance by discriminants.
"""
# Prep variables for sklearn LDA
X = df[range(1, df.shape[1])].values # input data matrix
y = df["Condition"].values # data categories list
# Calculate LDA
sklearn_lda = LDA()
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
try:
exp_var = sklearn_lda.explained_variance_ratio_
except AttributeError as ae:
print("\n{}: explained variance cannot be computed.\nPlease check this GitHub PR:"
" https://github.com/scikit-learn/scikit-learn/pull/6027".format(ae))
return X_lda_sklearn, y, "NA"
return X_lda_sklearn, y, exp_var
def handle_program_options():
"""Command line arguments."""
parser = argparse.ArgumentParser(description="Create an LDA bubble plot from either \
sample-grouped OTU abundance data or \
sample-wise distance matrix file.")
parser.add_argument("-i", "--otu_table", required=True,
help="Input biom file format OTU table. [REQUIRED]")
parser.add_argument("-m", "--map_fp", required=True,
help="Metadata mapping file. [REQUIRED]")
parser.add_argument("-g", "--group_by", required=True,
help="A column name in the mapping file containing categorical \
values that will be used to identify groups. Each sample \
ID must have a group entry. Default is no categories and \
all the data will be treated as a single group. [REQUIRED]")
parser.add_argument("-c", "--color_by", required=True,
help="A column name in the mapping file containing hexadecimal \
(#FF0000) color values that will be used to color the \
groups. Each sample ID must have a color entry. [REQUIRED]")
parser.add_argument("-ids", "--otu_ids_fp", required=True,
help="Path to a file containing one OTU ID per line. One plot \
will be created for each OTU. [REQUIRED]")
parser.add_argument("-dm", "--dist_matrix_file",
help="Input distance matrix file.")
parser.add_argument("--save_lda_input",
help="Save a CSV-format file of the transposed LDA-input table \
to the file specifed by this option.")
parser.add_argument("-od", "--output_dir", default=".",
help="The directory to save the LDA bubble plots to. By default, \
plots will be saved in current working directory.")
parser.add_argument("--scale_by", default=1000, type=float,
help="Species relative abundance is multiplied by this factor in \
order to make appropriate visible bubbles in the output \
plots. Default scaling is 1000.")
parser.add_argument("--figsize", default=[14, 8], type=int, nargs=2,
help="Specify the 'width height' in inches for LDA bubble plots."
"By default, figure size is 14x8 inches.")
parser.add_argument("-s", "--save_as", default="svg",
help="The type of image file for LDA plots. By default, plots \
will be saved in 'svg' format.")
parser.add_argument("--ggplot2_style", action="store_true",
help="Apply ggplot2 styling to the figure.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Displays species name as each is being plotted and stored \
to disk.")
return parser.parse_args()
def main():
args = handle_program_options()
# Parse and read mapping file
try:
header, imap = util.parse_map_file(args.map_fp)
category_idx = header.index(args.group_by)
except IOError as ioe:
err_msg = "\nError in metadata mapping filepath (-m): {}\n"
sys.exit(err_msg.format(ioe))
# Obtain group colors
class_colors = util.color_mapping(imap, header, args.group_by, args.color_by)
# Get otus for LDA bubble plots
try:
bubble_otus = set(pd.read_csv(args.otu_ids_fp, sep="\n", header=None)[0])
except IOError as ioe:
err_msg = "\nError in OTU IDs file (--bubble): {}\n"
sys.exit(err_msg.format(ioe))
# Load biom file and calculate relative abundance
try:
biomf = biom.load_table(args.otu_table)
except IOError as ioe:
err_msg = "\nError with biom format file (-d): {}\n"
sys.exit(err_msg.format(ioe))
# Get normalized relative abundances
rel_abd = bc.relative_abundance(biomf)
rel_abd = bc.arcsine_sqrt_transform(rel_abd)
abd_val = {abd for sid, v1 in rel_abd.items() for otuid, abd in v1.items() if abd > 0}
bubble_range = np.linspace(min(abd_val), max(abd_val), num=5) * args.scale_by
# Get abundance to the nearest 50
bubble_range = [int(50 * round(float(abd)/50)) for abd in bubble_range[1:]]
# Set up input for LDA calc and get LDA transformed data
if args.dist_matrix_file:
try:
uf_data = pd.read_csv(args.dist_matrix_file, sep="\t", index_col=0)
except IOError as ioe:
err_msg = "\nError with unifrac distance matrix file (-d): {}\n"
sys.exit(err_msg.format(ioe))
uf_data.insert(0, "Condition", [imap[sid][category_idx] for sid in uf_data.index])
sampleids = uf_data.index
if args.save_lda_input:
uf_data.to_csv(args.save_lda_input, sep="\t")
# Run LDA
X_lda, y_lda, exp_var = run_LDA(uf_data)
else:
df_rel_abd = pd.DataFrame(rel_abd).T
df_rel_abd.insert(0, "Condition", [imap[sid][category_idx]
for sid in df_rel_abd.index])
sampleids = df_rel_abd.index
if args.save_lda_input:
df_rel_abd.to_csv(args.save_lda_input, sep="\t")
# Run LDA
X_lda, y_lda, exp_var = run_LDA(df_rel_abd)
# Calculate position and size of SampleIDs to plot for each OTU
for otuid in bubble_otus:
otuname = oc.otu_name(biomf.metadata(otuid, axis="observation")["taxonomy"])
plot_data = {cat: {"x": [], "y": [], "size": [], "label": []}
for cat in class_colors.keys()}
for sid, data in zip(sampleids, X_lda):
category = plot_data[imap[sid][category_idx]]
try:
size = rel_abd[sid][otuid] * args.scale_by
except KeyError as ke:
print("{} not found in {} sample.".format(ke, sid))
continue
category["x"].append(float(data[0]))
category["y"].append(float(data[1]))
category["size"].append(size)
# Plot LDA bubble for each OTU
fig = plt.figure(figsize=args.figsize)
ax = fig.add_subplot(111)
for i, cat in enumerate(plot_data):
plt.scatter(plot_data[cat]["x"], plot_data[cat]["y"],
s=plot_data[cat]["size"], label=cat, color=class_colors[cat],
alpha=0.85, edgecolors="k")
if X_lda.shape[1] == 1:
plt.ylim((0.5, 2.5))
plt.title(" ".join(otuname.split("_")), style="italic", fontsize=13)
try:
plt.xlabel("LD1 (Percent Explained Variance: {:.3f}%)".format(exp_var[0]*100),
fontsize=13, labelpad=15)
except:
plt.xlabel("LD1", fontsize=13, labelpad=15)
try:
plt.ylabel("LD2 (Percent Explained Variance: {:.3f}%)".format(exp_var[1]*100),
fontsize=13, labelpad=15)
except:
plt.ylabel("LD2", fontsize=13, labelpad=15)
lgnd1 = plt.legend(loc="best", scatterpoints=3, fontsize=13)
for i in range(len(class_colors.keys())):
lgnd1.legendHandles[i]._sizes = [80] # Change the legend marker size manually
# Add the legend manually to the current plot
plt.gca().add_artist(lgnd1)
c = [plt.scatter([], [], c="w", edgecolors="k", s=s1) for s1 in bubble_range]
plt.legend(c, ["{}".format(s2) for s2 in bubble_range],
title="Scaled Bubble\n Sizes", frameon=True, labelspacing=2,
fontsize=13, loc=4, scatterpoints=1, borderpad=1.1)
# Set style for LDA bubble plots
if args.ggplot2_style:
gu.ggplot2_style(ax)
fc = "0.8"
else:
fc = "none"
# Save LDA bubble plots to output directory
if args.verbose:
print("Saving chart for {}".format(" ".join(otuname.split("_"))))
fig.savefig(pj(args.output_dir, "_".join(otuname.split())) + "." + args.save_as,
facecolor=fc, edgecolor="none", dpi=300,
bbox_inches="tight", pad_inches=0.2)
plt.close(fig)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_23913 | from pyox.client import Client, ServiceError, response_data
from pyox.webhdfs import WebHDFS
from io import StringIO
from enum import auto,Enum
import sys
import types
import requests
import json
JOB_TRACKER = 'jobTracker'
NAMENODE = 'nameNode'
OOZIE_APP_PATH = 'oozie.wf.application.path'
_jsonType = 'application/json'
def property_value(workflow,properties,name):
value = workflow.properties.get(name) if workflow is not None else None
if value is None:
value = properties.get(name)
return value;
def write_property(xml,name,value):
xml.write('<property>\n')
xml.write('<name>')
xml.write(name)
xml.write('</name>\n')
xml.write('<value>')
if type(value)==bool:
if value:
xml.write('true')
else:
xml.write('false')
else:
xml.write(str(value))
xml.write('</value>\n')
xml.write('</property>\n')
class XMLWriter:
def __init__(self,io):
self.io = io
self.open_elements = []
def _push(self,name):
self.open_elements.append((name,False,False))
def _pop(self):
return self.open_elements.pop(-1)
def _markchild(self,text=None):
if len(self.open_elements)==0:
return
current = self.open_elements[-1]
if not current[1]:
self.open_elements[-1] = (current[0],True,current[2] if text is None else text)
self.io.write('>')
def escape_attr(value):
return str(value).replace('"','"').replace('&','&')
def escape_text(value):
return str(value).replace('&','&').replace('<','<')
def empty(self,name,attrs={}):
self.start(name,attrs)
self.end()
return self
def start(self,name,attrs={}):
self._markchild()
for i in range(len(self.open_elements)-1):
self.io.write(' ')
self.io.write('<')
self.io.write(name)
for attr in attrs.items():
self.io.write(' ')
self.io.write(attr[0])
self.io.write('="')
self.io.write(XMLWriter.escape_attr(attr[1]))
self.io.write('"')
self._push(name)
return self
def end(self):
current = self._pop()
if current[1]:
if not current[2]:
for i in range(len(self.open_elements)-1):
self.io.write(' ')
self.io.write('</')
self.io.write(current[0])
self.io.write('>')
else:
self.io.write('/>')
return self
def text(self,value):
self._markchild(text=True)
self.io.write(XMLWriter.escape_text(value))
return self
def newline(self):
self._markchild()
self.io.write('\n')
return self
def named_child(self,name,value,all=True):
if value is not None:
if type(value)==list:
if all:
for item in value:
if hasattr(item,'to_xml'):
self.start(name)
item.to_xml(self)
self.end()
else:
self.start(name).text(str(item)).end()
self.newline()
else:
if hasattr(value[0],'to_xml'):
self.start(name)
value[0].to_xml(self)
self.end()
else:
self.start(name).text(str(value[0])).end()
self.newline()
else:
if hasattr(value,'to_xml'):
self.start(name)
value.to_xml(self)
self.end()
else:
self.start(name).text(value).end()
self.newline()
def child(self,value,all=True,container=None,wrapper=None,name_value=False):
if value is not None:
if type(value)==list:
if all:
for item in value:
if hasattr(item,'to_xml'):
item.to_xml(self)
else:
self.text(str(item))
self.newline()
else:
if hasattr(value[0],'to_xml'):
value[0].to_xml(self)
else:
self.text(str(value[0]))
self.newline()
elif type(value)==dict:
if container is not None:
self.start(container)
self.newline()
for name,value in value.items():
if wrapper is not None:
self.start(wrapper)
self.newline()
if name_value:
self.named_child('name',name)
self.named_child('value',value)
else:
self.named_child(name,value)
if wrapper is not None:
self.end()
self.newline()
if container is not None:
self.end()
self.newline()
else:
if hasattr(value,'to_xml'):
value.to_xml(self)
else:
self.text(str(value))
self.newline()
def finish(self):
while len(self.open_elements)>0:
self.end().newline()
class WorkflowItem:
class Type(Enum):
START = auto()
END = auto()
SWITCH = auto()
FORK = auto()
JOIN = auto()
KILL = auto()
ACTION = auto()
def __init__(self,itemType,name,targets,**kwargs):
self.itemType = itemType
self.name = name
self.targets = targets
self.properties = kwargs
class InvalidWorkflow(ValueError):
def __init__(self,message,errors):
super().__init__(message)
self.errors = errors
class XMLSerializable:
def __str__(self):
io = StringIO()
self.to_xml(XMLWriter(io))
return io.getvalue()
def to_xml(self,xml):
pass
class Workflow(XMLSerializable):
def __init__(self,name,start,**kwargs):
self.name = name
self.start = start
self.items = {}
self.credentials = []
self.end('end')
self.last_action = None
self.properties = kwargs
def start(name,start,**kwargs):
w = Workflow(name,start,**kwargs)
return w
def end(self,name):
self.end = name
self.items[name] = WorkflowItem(WorkflowItem.Type.END,name,[])
return self
def action(self,name,action,credential=None,ok=None,error='error',retry=None):
if self.start==None:
self.start = name
if name in self.items:
raise ValueError('A workflow item named {} has already been defined.'.format(name))
self.items[name] = WorkflowItem(WorkflowItem.Type.ACTION,name,[ok,error],action=action,credential=credential,retry=retry)
if self.last_action is not None:
self.last_action.targets[0] = name
self.last_action = self.items[name]
if action.workflow is None:
action.workflow = self
return self
def switch(self,name,*cases):
if self.start==None:
self.start = name
if name in self.items:
raise ValueError('A workflow item named {} has already been defined.'.format(name))
if len(cases)==0:
raise ValueError('There must be at least one case.')
defaultCount = 0
targets = []
for case in cases:
if type(case)==str:
defaultCount += 1
targets.append(case)
elif type(case)==tuple:
if len(case)!=2:
raise ValueError('Too many tuple values for case: {}'.format(str(case)))
targets.append(case[0])
else:
raise ValueError('Incorrect case type: {}'.format(str(type(case))))
if defaultCount>1:
raise ValueError('More than one default provided on switch.')
self.items[name] = WorkflowItem(WorkflowItem.Type.SWITCH,name,targets,cases=cases)
return self
def fork(self,name,*starts):
if self.start==None:
self.start = name
if name in self.items:
raise ValueError('A workflow item named {} has already been defined.'.format(name))
if len(starts)<2:
raise ValueError('A fork must have at least two pathes.')
self.items[name] = WorkflowItem(WorkflowItem.Type.FORK,name,starts)
return self
def join(self,name,to):
if self.start==None:
self.start = name
if name in self.items:
raise ValueError('A workflow item named {} has already been defined.'.format(name))
self.items[name] = WorkflowItem(WorkflowItem.Type.FORK,name,[to])
return self
def kill(self,name,message):
if self.start==None:
self.start = name
if name in self.items:
raise ValueError('A workflow item named {} has already been defined.'.format(name))
self.items[name] = WorkflowItem(WorkflowItem.Type.KILL,name,[],message=message)
return self
def credential(self,cred_name,cred_type,*properties):
action = XMLSerializable()
action.cred_name = cred_name
action.cred_type = cred_type
action.properties = properties
def to_xml(self,xml):
xml.start('credential',{'name':self.cred_name,'type':self.cred_type}).newline()
for item in self.properties:
if type(item)==dict:
for name in item:
value = item[name]
xml.start('property').newline()
xml.named_child('name',name)
xml.named_child('value',value)
xml.end().newline()
else:
xml.child(item)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
self.credentials.append(action)
return self
def case(to,predicate):
return (to,predicate)
def default(to):
return to
def streaming(**kwargs):
action = XMLSerializable()
action.properties = kwargs
def to_xml(self,xml):
xml.start('streaming').newline()
xml.named_child('mapper',self.properties.get('mapper'),all=False)
xml.named_child('reducer',self.properties.get('reducer'),all=False)
xml.named_child('record-reader',self.properties.get('record_reader'),all=False)
xml.named_child('record-reader-mapping',self.properties.get('record_reader_mapping'))
xml.named_child('env',self.properties.get('env'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def pipes(**kwargs):
action = XMLSerializable()
action.properties = kwargs
def to_xml(self,xml):
xml.start('pipes').newline()
xml.named_child('map',self.properties.get('map'),all=False)
xml.named_child('reduce',self.properties.get('reduce'),all=False)
xml.named_child('inputformat',self.properties.get('inputformat'),all=False)
xml.named_child('partitioner',self.properties.get('partitioner'),all=False)
xml.named_child('writer',self.properties.get('writer'),all=False)
xml.named_child('program',self.properties.get('program'),all=False)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def delete(path):
action = XMLSerializable()
action.path = path
def to_xml(self,xml):
xml.empty('delete',{'path':self.path}).newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def mkdir(path):
action = XMLSerializable()
action.path = path
def to_xml(self,xml):
xml.empty('mkdir',{'path':self.path}).newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def prepare(*items):
action = XMLSerializable()
action.items = items
def to_xml(self,xml):
xml.start('prepare').newline()
for item in self.items:
item.to_xml(xml)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def property(name,value,description=None):
action = XMLSerializable()
action.name = name
action.value = value
if name is None:
raise ValueError('The property name can not be missing.')
if value is None:
raise ValueError('The property value can not be missing.')
action.description = description
def to_xml(self,xml):
xml.start('property').newline()
xml.named_child('name',self.name)
xml.named_child('value',self.value)
xml.named_child('description',self.description)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def configuration(*items):
action = XMLSerializable()
action.items = items
def to_xml(self,xml):
xml.start('configuration').newline()
for item in self.items:
if type(item)==dict:
for name in item:
value = item[name]
xml.start('property').newline()
xml.named_child('name',name)
xml.named_child('value',value)
xml.end().newline()
else:
item.to_xml(item)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def map_reduce(streaming_or_pipes,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.streaming_or_pipes = streaming_or_pipes
def to_xml(self,xml):
xml.start('map-reduce').newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.child(self.streaming_or_pipes)
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def spark(name,jar,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.name = name
action.jar = jar
action.properties = kwargs
def to_xml(self,xml):
xml.start('spark',{'xmlns':'uri:oozie:spark-action:0.1'}).newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('master',property_value(self.workflow,self.properties,'master'))
xml.named_child('mode',property_value(self.workflow,self.properties,'mode'))
xml.named_child('name',self.name)
xml.named_child('jar',self.jar)
xml.named_child('spark-opts',self.properties.get('spark_opts'))
xml.named_child('arg',self.properties.get('arg'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def pig(script,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.script = script
def to_xml(self,xml):
xml.start('pig').newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('script',self.script)
xml.named_child('param',self.properties.get('param'))
xml.named_child('argument',self.properties.get('argument'))
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def hive(script,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.script = script
def to_xml(self,xml):
xml.start('hive',{'xmlns':'uri:oozie:hive-action:0.3'}).newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('script',self.script)
xml.named_child('param',self.properties.get('param'))
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def hive2(jdbc_url,script,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.jdbc_url = jdbc_url
action.script = script
def to_xml(self,xml):
xml.start('hive2',{'xmlns':'uri:oozie:hive2-action:0.1'}).newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('jdbc-url',self.jdbc_url)
xml.named_child('password',self.properties.get('password'))
xml.named_child('script',self.script)
xml.named_child('param',self.properties.get('param'))
xml.named_child('argument',self.properties.get('argument'))
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def ssh(host,command,*args,capture_output=False,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.host = host
action.command = command
action.args = args
action.capture_output = capture_output
def to_xml(self,xml):
xml.start('ssh').newline()
xml.named_child('host',self.host)
xml.named_child('command',self.command)
for arg in self.args:
xml.named_child('args',arg)
if self.capture_output:
xml.start('capture-output').end().newline()
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def shell(command,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.command = command
def to_xml(self,xml):
xml.start('shell',{'xmlns':'uri:oozie:shell-action:0.1'}).newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('exec',self.command)
xml.named_child('argument',self.properties.get('argument'))
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
if self.properties.get('capture_output'):
xml.start('capture-output').end().newline()
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def sub_workflow(app_path,configuration=None,propagate_configuration=False,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.app_path = app_path
action.configuration = configuration
action.propagate_configuration = propagate_configuration
def to_xml(self,xml):
xml.start('sub-workflow').newline()
xml.named_child('app-path',self.app_path)
if self.propagate_configuration:
xml.start('propagate-configuration').end().newline()
xml.child(self.configuration)
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def fs(*operations):
action = XMLSerializable()
action.operations = operations;
def to_xml(self,xml):
xml.start('fs').newline()
xml.child(action.operations)
xml.end()
action.to_xml = types.MethodType(to_xml,action)
return action
def java(main_class,workflow=None,**kwargs):
action = XMLSerializable()
action.workflow = workflow
action.properties = kwargs
action.main_class = main_class
def to_xml(self,xml):
xml.start('java').newline()
xml.named_child('job-tracker',property_value(self.workflow,self.properties,'job_tracker'))
xml.named_child('name-node',property_value(self.workflow,self.properties,'name_node'))
xml.child(self.properties.get('prepare'))
xml.named_child('job-xml',self.properties.get('job_xml'))
xml.child(self.properties.get('configuration'),container='configuration',wrapper='property',name_value=True)
xml.named_child('main-class',self.main_class)
xml.named_child('java-opts',self.properties.get('java_opts'))
xml.named_child('arg',self.properties.get('arg'))
xml.named_child('file',self.properties.get('file'))
xml.named_child('archive',self.properties.get('archive'))
if self.properties.get('capture_output'):
xml.start('capture-output').end().newline()
xml.end().newline()
action.to_xml = types.MethodType(to_xml,action)
return action
def check(self):
errors = []
if self.start is None:
errors.append('The start target has not been defined.')
if self.start is not None and self.items.get(self.start) is None:
errors.append('The start {} target has not been defined.'.format(str(self.start)))
if self.end is None and self.items.get('end') is None:
errors.append('The end target has not been defined.')
for item in self.items.values():
for name in item.targets:
sname = str(name)
if self.items.get(sname) is None:
errors.append('Item {} referenced undefined target {}'.format(str(item.name),sname))
if len(errors)>0:
raise InvalidWorkflow('The workflow {} is invalid.'.format(self.name),errors)
return self
def to_xml(self,xml):
xml.start('workflow-app',{'xmlns' : 'uri:oozie:workflow:0.5', 'name' : self.name}).newline()
if len(self.credentials)>0:
xml.start('credentials').newline()
xml.child(self.credentials)
xml.end().newline()
if self.start is not None:
xml.empty('start',{'to':self.start}).newline()
for item in self.items.values():
if item.itemType==WorkflowItem.Type.ACTION:
attrs = {'name':item.name}
credential = item.properties.get('credential')
if credential is not None:
attrs['cred'] = str(credential)
retry = item.properties.get('retry')
if retry is not None:
attrs['retry-max'] = retry[0]
attrs['retry-interval'] = retry[1]
xml.start('action',attrs).newline()
action = item.properties.get('action')
if action is not None:
action.to_xml(xml)
ok = item.targets[0]
if ok is None:
ok = self.end
xml.empty('ok',{'to':ok}).newline() \
.empty('error',{'to':item.targets[1]}).newline() \
.end().newline()
elif item.itemType==WorkflowItem.Type.SWITCH:
default = None
xml.start('decision',{'name':item.name}).newline()
xml.start('switch').newline()
for case in item.properties['cases']:
if type(case)==str:
default = case
else:
xml.start('case',{'to':case[0]})
xml.text(case[1])
xml.end().newline()
if default is not None:
xml.empty('default',{'to':default}).newline()
xml.end().newline()
xml.end().newline()
elif item.itemType==WorkflowItem.Type.FORK:
xml.start('fork',{'name':item.name}).newline()
for start in item.targets:
xml.empty('path',{'start':start}).newline()
xml.end().newline()
elif item.itemType==WorkflowItem.Type.JOIN:
xml.empty('join',{'name':item.name,'to':item.targets[0]}).newline()
elif item.itemType==WorkflowItem.Type.KILL:
xml.start('kill',{'name':item.name}).newline()
xml.named_child('message',item.properties.get('message'))
xml.end().newline()
if self.end is not None:
xml.empty('end',{'name':self.end}).newline()
xml.finish()
class Job:
def __init__(self,oozie,path,namenode='sandbox',verbose=False):
self.oozie = oozie
self.path = path
if self.path[0]!='/':
self.path = '/'+self.path
if self.path[-1]=='/':
self.path = self.path[0:-1]
self.namenode = namenode
self.hdfs = self.oozie.createHDFSClient()
self.verbose = verbose
self.progress = self.oozie.progress
if self.verbose:
sys.stderr.write('Creating workflow directory {} ...\n'.format(path))
if not self.hdfs.make_directory(path):
sys.stderr.write('Cannot create {}\n'.format(path))
return
def copy_resource(self,data,resource_path,overwrite=False):
return self.hdfs.copy(data,self.path + '/' + resource_path,overwrite=overwrite)
def define_workflow(self,data,overwrite=False):
if type(data)==Workflow:
data = StringIO(str(data))
return self.copy_resource(data,'workflow.xml',overwrite=overwrite)
def start(self,properties,verbose=False):
xml = StringIO()
xml.write('<?xml version="1.0" encoding="UTF-8"?>\n<configuration>\n')
if properties is not None:
for name in properties:
value = properties[name]
write_property(xml,name,value)
for name in self.oozie.properties:
if name not in properties:
write_property(xml,name,value)
if properties is not None and OOZIE_APP_PATH not in properties:
write_property(xml,OOZIE_APP_PATH,'hdfs://{}{}/workflow.xml'.format(self.namenode,self.path))
if properties is not None and NAMENODE not in properties:
write_property(xml,NAMENODE,'hdfs://{}'.format(self.namenode))
xml.write('</configuration>\n')
if verbose or self.verbose:
sys.stderr.write(xml.getvalue())
sys.stderr.write('\n')
if verbose or self.verbose or self.progress:
sys.stderr.write('Requesting job start...\n')
return self.oozie.start(xml.getvalue())
class Oozie(Client):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.service = 'oozie'
self.properties = {}
self.defaultNamenode = kwargs.get('namenode')
if self.defaultNamenode is None:
self.defaultNamenode = 'sandbox'
tracker = kwargs.get('tracker')
if tracker is not None:
self.properties[JOB_TRACKER] = tracker
def createHDFSClient(self):
webhdfs = WebHDFS(base=self.base,secure=self.secure,host=self.host,port=self.port,gateway=self.gateway,username=self.username,password=self.password,cookies=self.cookies)
webhdfs.bearer_auth = self.bearer_auth
webhdfs.proxies = self.proxies
webhdfs.verify = self.verify
if self.verbose:
webhdfs.enable_verbose()
return webhdfs
def addProperty(self,name,value):
self.properties[name] = value
def removeProperty(self,name):
return self.property.pop(name,None)
def newJob(self,path,namenode=None,verbose=False):
return Job(self,path,namenode=namenode if namenode is not None else self.defaultNamenode,verbose=verbose)
def start(self,xml):
headers = {'Content-Type' : 'application/xml; charset=UTF-8'}
url = '{}/jobs'.format(self.service_url())
req = self.post(url,params={'action':'start'},data=xml,headers=headers)
#print(req.url)
if req.status_code==201:
msg = req.json()
#print(msg)
return msg['id']
else:
#print(req.text)
raise ServiceError(req.status_code,'Cannot start job.',request=req)
def status(self,jobid,show='info'):
url = '{}/job/{}'.format(self.service_url(version='v2'),jobid)
req = self.get(url,params={'show':show})
#print(req.url)
if req.status_code==200:
return response_data(req)
else:
raise ServiceError(req.status_code if req.status_code!=400 else 404,'Cannot get job information for {}'.format(jobid),request=req)
def list_jobs(self,status=None,offset=0,count=50):
url = '{}/jobs'.format(self.service_url(version='v2'))
params = {
'offset' : str(offset),
'len' : str(count)
}
if status is not None:
params['filter'] = 'status='+str(status)
req = self.get(url,params=params)
#print(req.url)
if req.status_code==200:
return response_data(req)
else:
raise ServiceError(req.status_code,'Cannot list jobs',request=req)
def submit(self,path,properties=None,workflow=None,copy=[],verbose=False,tracker=None):
job = self.newJob(path,verbose=verbose)
if workflow is not None:
if verbose or self.progress:
sys.stderr.write('Copying workflow.xml to {} ...\n'.format(path))
job.define_workflow(workflow,overwrite=True)
for info in copy:
if type(info)==tuple:
fpath = info[0]
dest = info[1]
else:
fpath = info
slash = fpath.rfind('/')
dest = fpath[slash+1] if slash>=0 else fpath
if type(fpath)==str:
with open(fpath,'rb') as data:
if verbose or self.progress:
if fpath==dest:
sys.stderr.write('→ {}\n'.format(dest))
else:
sys.stderr.write('{} → {}\n'.format(fpath,dest))
job.copy_resource(data,dest,overwrite=True)
else:
if verbose or self.progress:
sys.stderr.write('<data> → {}\n'.format(dest))
job.copy_resource(fpath,dest,overwrite=True)
jobid = job.start(properties,verbose=verbose)
if self.progress:
sys.stderr.write('{} job started.\n'.format(jobid))
if tracker is not None:
if verbose or self.progress:
sys.stderr.write('Requesting tracking of {}\n'.format(jobid))
if tracker[-1]=='/':
tracker = tracker[0:-1]
url = tracker + '/task/track/'
track_req = requests.post(
url,
auth=self.auth(),
data=json.dumps({'id' : jobid}),
headers={'Content-Type' : 'application/json; charset=UTF-8'},
verify=self.verify);
if track_req.status_code!=200:
raise ServiceError(track_req.status_code,'Cannot track job {} via {}'.format(jobid,url),request=track_req)
if self.progress:
sys.stderr.write('{} is being tracked.\n'.format(jobid))
return jobid
|
the-stack_0_23914 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import torch
from torch import Tensor
from torchmetrics.functional.classification.kl_divergence import _kld_compute, _kld_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
class KLDivergence(Metric):
r"""Computes the `KL divergence`_:
.. math::
D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}}
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
Args:
p: data distribution with shape ``[N, d]``
q: prior or approximate distribution with shape ``[N, d]``
log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities,
will normalize to make sure the distributes sum to 1
reduction:
Determines how to reduce over the ``N``/batch dimension:
- ``'mean'`` [default]: Averages score across samples
- ``'sum'``: Sum score across samples
- ``'none'`` or ``None``: Returns score per sample
Raises:
TypeError:
If ``log_prob`` is not an ``bool``
ValueError:
If ``reduction`` is not one of ``'mean'``, ``'sum'``, ``'none'`` or ``None``
.. note::
Half precision is only support on GPU for this metric
Example:
>>> import torch
>>> from torchmetrics.functional import kl_divergence
>>> p = torch.tensor([[0.36, 0.48, 0.16]])
>>> q = torch.tensor([[1/3, 1/3, 1/3]])
>>> kl_divergence(p, q)
tensor(0.0853)
"""
is_differentiable = True
higher_is_better = False
# TODO: canot be used because if scripting
# measures: Union[List[Tensor], Tensor]
total: Tensor
def __init__(
self,
log_prob: bool = False,
reduction: Optional[str] = "mean",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if not isinstance(log_prob, bool):
raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}")
self.log_prob = log_prob
allowed_reduction = ["mean", "sum", "none", None]
if reduction not in allowed_reduction:
raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}")
self.reduction = reduction
if self.reduction in ["mean", "sum"]:
self.add_state("measures", torch.tensor(0.0), dist_reduce_fx="sum")
else:
self.add_state("measures", [], dist_reduce_fx="cat")
self.add_state("total", torch.tensor(0), dist_reduce_fx="sum")
def update(self, p: Tensor, q: Tensor) -> None: # type: ignore
measures, total = _kld_update(p, q, self.log_prob)
if self.reduction is None or self.reduction == "none":
self.measures.append(measures)
else:
self.measures += measures.sum()
self.total += total
def compute(self) -> Tensor:
measures = dim_zero_cat(self.measures) if self.reduction is None or self.reduction == "none" else self.measures
return _kld_compute(measures, self.total, self.reduction)
|
the-stack_0_23915 | # -*- coding: utf-8 -*-
u"""Public functions from sirepo
Use this to call sirepo from other packages or Python notebooks.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template import lattice
from sirepo.template.lattice import LatticeUtil
import copy
import inspect
import py.error
import pykern.pkio
import sirepo.sim_data
import sirepo.util
class LibAdapterBase:
"""Common functionality between code specific LibAdapter implementations."""
def __init__(self):
m = inspect.getmodule(self)
self._sim_data, _, self._schema = sirepo.sim_data.template_globals(m.SIM_TYPE)
self._code_var = m.code_var
def _convert(self, data):
def _model(model, name):
s = self._schema.model[name]
k = x = v = None
try:
for k, x in s.items():
t = x[1]
v = model[k] if k in model else x[2]
if t == 'RPNValue':
t = 'Float'
if cv.is_var_value(v):
model[k] = cv.eval_var_with_assert(v)
continue
if t == 'Float':
model[k] = float(v) if v else 0.
elif t == 'Integer':
model[k] = int(v) if v else 0
except Exception as e:
pkdlog('model={} field={} decl={} value={} exception={}', name, k, x, v, e)
raise
cv = self._code_var(data.models.rpnVariables)
for x in data.models.rpnVariables:
x.value = cv.eval_var_with_assert(x.value)
for k, v in data.models.items():
if k in self._schema.model:
_model(v, k)
for x in ('elements', 'commands'):
for m in data.models[x]:
_model(m, LatticeUtil.model_name_for_data(m))
for bl in data.models.beamlines:
if 'positions' in bl:
for p in bl.positions:
p.elemedge = cv.eval_var_with_assert(p.elemedge)
return data
def _verify_files(self, path, filenames):
for f in filenames:
assert sirepo.util.secure_filename(f) == f, \
f'file={f} must be a simple name'
p = path.dirpath().join(f)
assert p.check(file=True), \
f'file={f} missing'
def _write_input_files(self, data, source_path, dest_dir):
for f in set(
LatticeUtil(data, self._schema).iterate_models(
lattice.InputFileIterator(self._sim_data, update_filenames=False),
).result,
):
f = self._sim_data.lib_file_name_without_type(f)
try:
dest_dir.join(f).mksymlinkto(source_path.new(basename=f), absolute=False)
except py.error.EEXIST:
pass
class GenerateBase:
"""Common functionality between code specific Generate implementations."""
@property
def util(self):
from sirepo.template.lattice import LatticeUtil
if not hasattr(self, '_util'):
self._util = LatticeUtil(self.data, self._schema)
return self._util
class Importer:
def __init__(self, sim_type):
import sirepo.template
self.__adapter = sirepo.template.import_module(sim_type).LibAdapter()
def parse_file(self, path):
p = pykern.pkio.py_path(path)
with pykern.pkio.save_chdir(p.dirpath()):
return SimData(
self.__adapter.parse_file(p),
p,
self.__adapter,
)
class SimData(PKDict):
"""Represents data of simulation"""
def __init__(self, data, source, adapter):
super().__init__(data)
self.pkdel('report')
self.__source = source
self.__adapter = adapter
def copy(self):
"""Allows copy.deepcopy"""
return self.__class__(self, self.__source, self.__adapter)
def write_files(self, dest_dir):
"""Writes files for simulation state
Args:
dest_dir (str or py.path): where to write files
Returns:
PKDict: files written (debugging only)
"""
return self.__adapter.write_files(
# need to make a copy, b/c generate_parameters_file modifies
copy.deepcopy(self),
self.__source,
pykern.pkio.py_path(dest_dir),
)
|
the-stack_0_23916 | import torch
# from utils import convert2cpu
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key,value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks
def print_cfg(blocks):
print('layer filters size input output');
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters =[]
out_widths =[]
out_heights =[]
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)/2 if is_pad else 0
width = (prev_width + 2*pad - kernel_size)/stride + 1
height = (prev_height + 2*pad - kernel_size)/stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'avgpool':
width = 1
height = 1
print('%5d %-6s %3d x %3d x%4d -> %3d' % (ind, 'avg', prev_width, prev_height, prev_filters, prev_filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'cost':
print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'reorg':
stride = int(block['stride'])
filters = stride * stride * prev_filters
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert(prev_width == out_widths[layers[1]])
assert(prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'region':
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id+ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'connected':
filters = int(block['output'])
print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters))
prev_filters = filters
out_widths.append(1)
out_heights.append(1)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type']))
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w
return start
def save_conv(fp, conv_model):
if conv_model.bias.is_cuda:
convert2cpu(conv_model.bias.data).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
conv_model.bias.data.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w
return start
def save_conv_bn(fp, conv_model, bn_model):
if bn_model.bias.is_cuda:
convert2cpu(bn_model.bias.data).numpy().tofile(fp)
convert2cpu(bn_model.weight.data).numpy().tofile(fp)
convert2cpu(bn_model.running_mean).numpy().tofile(fp)
convert2cpu(bn_model.running_var).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
bn_model.bias.data.numpy().tofile(fp)
bn_model.weight.data.numpy().tofile(fp)
bn_model.running_mean.numpy().tofile(fp)
bn_model.running_var.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def load_fc(buf, start, fc_model):
num_w = fc_model.weight.numel()
num_b = fc_model.bias.numel()
fc_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
fc_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w
return start
def save_fc(fp, fc_model):
fc_model.bias.data.numpy().tofile(fp)
fc_model.weight.data.numpy().tofile(fp)
if __name__ == '__main__':
import sys
blocks = parse_cfg('cfg/yolo.cfg')
if len(sys.argv) == 2:
blocks = parse_cfg(sys.argv[1])
print_cfg(blocks)
|
the-stack_0_23917 | from pom_pages.recreations import RecreationsPage
recreation_name = "text=playwright-test"
def test_find_recreation_is_working(page):
recreation_page = RecreationsPage(page)
recreation_page.open()
recreation_page.select_recreation(recreation_name)
# verify hike name
assert page.inner_text(
"//*[@id='hikeTitle']") == "playwright-test"
|
the-stack_0_23918 | import os
import json
import pytest
from pytijo import parser
@pytest.fixture(scope="module")
def mock_struct(request):
return {
"tables": [
{
"id": "\[TABLE (\d{1,2})\]",
"flows": [
{
"id": "\[FLOW_ID(\d+)\]",
"timestamp": "Timestamp\s+=\s+(.+)",
"ofp_version": "ofp_version\s+=\s+(\d+)",
"controller_group": "ControllerGroup\s+=\s+(\d+)",
"controller_id": "ControllerId\s+=\s+(\d+)",
"priority": "Priority\s+=\s+(\d+)",
"idle_timeout": "Idle_timeout\s+=\s+(\d+)",
"hard_timeout": "Hard_timeout\s+=\s+(\d+)",
"packet_count": "Packet_count\s+=\s+(\d+)",
"byte_count": "Byte_count\s+=\s+(\d+)",
"cookie": "Cookie\s+=\s+([0-9a-fA-F]+)",
"send_flow_rem": "Send_flow_rem\s+=\s+(true|false)",
"match_fields": {
"block_start": "(\[MATCHFIELDS\])",
"block_end": "(\[INSTRUCTIONS\])",
"ether_type": "OFPXMT_OFB_ETH_TYPE\s+=\s+(.+)",
"in_port": "OFPXMT_OFB_IN_PORT\s+=\s+(.+)",
"mpls_label": "OFPXMT_OFB_MPLS_LABEL\s+=\s+(.+)",
},
"instructions": {
"block_start": "(\[INSTRUCTIONS\])",
"go_to_table": {
"block_start": "(\[OFPIT_GOTO_TABLE\])",
"table": "table\s+=\s+(\d+)",
},
"apply_actions": {
"block_start": "(\[OFPIT_APPLY_ACTIONS\])",
"output": {
"port": "port\s+=\s+(.+)",
"mlen": "mlen\s+=\s+(.+)",
},
"pop_mpls": {
"block_start": "(\[OFPAT_POP_MPLS\])",
"eth": "eth\s+=\s+(.+)",
},
"group": {
"block_start": "(\[OFPAT_GROUP\])",
"id": "id\s+=\s+(\d+)",
},
},
},
}
],
}
]
}
@pytest.fixture(scope="module")
def mock_group_struct(request):
return {
"groups": [
{
"id": "Group id:\s+(\d+)",
"ref_count": "Reference count:\s+(\d+)",
"packet_count": "Packet count:\s+(\d+)",
"byte_count": "Byte count:\s+(\d+)",
"bucket": [
{
"id": "Bucket\s+(\d+)",
"packet_count": "Packet count:\s+(\d+)",
"byte_count": "Byte count:\s+(\d+)",
}
],
}
]
}
def read(filename):
output_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
with open(output_file, "r") as fin:
return fin.read()
def test_simple_struct():
struct = {"message": r"(.*)"}
lines = ["Hello World"]
expected_output = {"message": "Hello World"}
parsed = parser.parse_struct(lines, struct)
assert parsed == expected_output
def test_simple_list():
struct = {"count": [r"(\d)"]}
lines = [
"The count says: 1",
"The count says: 2",
"The count says: 3",
"The count says: 4",
"The count says: 5",
]
expected_output = {"count": [1, 2, 3, 4, 5]}
parsed = parser.parse_struct(lines, struct)
# We need to convert to type after parsing
parsed = {"count": list(map(int, parsed["count"]))}
assert parsed == expected_output
def test_flows(mock_struct):
lines = read("./flow_output.txt").splitlines()
expected_output = json.loads(read("./flow_output_parsed.txt"))
parsed = parser.parse_struct(lines, mock_struct)
assert parsed == expected_output
def test_groups(mock_group_struct):
lines = read("./group_output.txt").splitlines()
expected_output = json.loads(read("./group_output_parsed.txt"))
parsed = parser.parse_struct(lines, mock_group_struct)
assert parsed == expected_output
def test_parse(mock_struct):
expected_output = json.loads(read("./flow_output_parsed.txt"))
parsed = parser.parse(read("./flow_output.txt"), mock_struct)
assert parsed == expected_output
|
the-stack_0_23923 | """
Test the lldb platform Python API.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class PlatformPythonTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_platform_list(self):
"""Test SBDebugger::GetNumPlatforms() & GetPlatformAtIndex() API"""
# Verify the host platform is present by default.
initial_num_platforms = self.dbg.GetNumPlatforms()
self.assertGreater(initial_num_platforms, 0)
host_platform = self.dbg.GetPlatformAtIndex(0)
self.assertTrue(host_platform.IsValid() and
host_platform.GetName() == 'host',
'The host platform is present')
# Select another platform and verify that the platform is added to
# the platform list.
platform_idx = self.dbg.GetNumAvailablePlatforms() - 1
if platform_idx < 1:
self.fail('No platforms other than host are available')
platform_data = self.dbg.GetAvailablePlatformInfoAtIndex(platform_idx)
platform_name = platform_data.GetValueForKey('name').GetStringValue(100)
self.assertNotEqual(platform_name, 'host')
self.dbg.SetCurrentPlatform(platform_name)
selected_platform = self.dbg.GetSelectedPlatform()
self.assertTrue(selected_platform.IsValid())
self.assertEqual(selected_platform.GetName(), platform_name)
self.assertEqual(self.dbg.GetNumPlatforms(), initial_num_platforms + 1)
platform_found = False
for platform_idx in range(self.dbg.GetNumPlatforms()):
platform = self.dbg.GetPlatformAtIndex(platform_idx)
if platform.GetName() == platform_name:
platform_found = True
break
self.assertTrue(platform_found)
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_host_is_connected(self):
# We've already tested that this one IS the host platform.
host_platform = self.dbg.GetPlatformAtIndex(0)
self.assertTrue(host_platform.IsConnected(), "The host platform is always connected")
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_available_platform_list(self):
"""Test SBDebugger::GetNumAvailablePlatforms() and GetAvailablePlatformInfoAtIndex() API"""
num_platforms = self.dbg.GetNumAvailablePlatforms()
self.assertGreater(
num_platforms, 0,
'There should be at least one platform available')
for i in range(num_platforms):
platform_data = self.dbg.GetAvailablePlatformInfoAtIndex(i)
name_data = platform_data.GetValueForKey('name')
desc_data = platform_data.GetValueForKey('description')
self.assertTrue(
name_data and name_data.IsValid(),
'Platform has a name')
self.assertEqual(
name_data.GetType(), lldb.eStructuredDataTypeString,
'Platform name is a string')
self.assertTrue(
desc_data and desc_data.IsValid(),
'Platform has a description')
self.assertEqual(
desc_data.GetType(), lldb.eStructuredDataTypeString,
'Platform description is a string')
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_shell_interpreter(self):
""" Test a shell with a custom interpreter """
platform = self.dbg.GetSelectedPlatform()
self.assertTrue(platform.IsValid())
sh_cmd = lldb.SBPlatformShellCommand('/bin/zsh', 'echo $0')
self.assertIn('/bin/zsh', sh_cmd.GetShell())
self.assertIn('echo $0', sh_cmd.GetCommand())
self.build()
sh_cmd.SetShell(self.getBuildArtifact('a.out'))
err = platform.Run(sh_cmd)
self.assertSuccess(err)
self.assertIn("SUCCESS", sh_cmd.GetOutput())
|
the-stack_0_23925 | import streamlit as st
import requests, json, math, sys, os
import urllib.request
#######################
# Import all the things
#######################
# Import to allow image manipulation.
from PIL import Image, ImageFont, ImageDraw
from io import BytesIO
#################
# Debug Variables
#################
log_image_urls = False # Log the image URLs in the console
log_card_metadata = True # Log the metadata of the card, e.g.: ID, contract, name, etc...
#######################################
# Set up file path for folder and fonts
#######################################
# Gets the current folder location.
filepath = os.path.dirname(os.path.realpath(sys.argv[0]))
print('Local filepath is: ' + filepath)
# Declaring the font we use for the title.
print(f'{filepath}\cardgeneration\PoetsenOne-Regular.ttf')
card_font = ImageFont.truetype(f'{filepath}\cardgeneration\PoetsenOne-Regular.ttf', 64)
############################
# Setup the folder structure
############################
# Create image folder if it doesn't exist.
if not os.path.exists('{filepath}'):
os.mkdir('{filepath}')
if not os.path.exists(f'{filepath}\collections'):
os.mkdir(f'{filepath}\collections')
#######################################
# Generate cards from downloaded images
#######################################
title_font = ImageFont.truetype(f'{filepath}\cardgeneration\PoetsenOne-Regular.ttf', 40)
description_font = ImageFont.truetype(f'{filepath}\cardgeneration\PoetsenOne-Regular.ttf', 40)
def CreateImageCard(collection_name, formatted_number, card_name, nft_input_location):
# Create variables for holding the images we want to merge.
nft_image = Image.open(f"{nft_input_location}")
background_image = Image.open(f"{filepath}\cardgeneration\card_background.png")
trim_image = Image.open(f"{filepath}\cardgeneration\card_trim.png")
# Convert all the images to RGBA
nft_image.convert("RGBA")
background_image.convert("RGBA")
trim_image.convert("RGBA")
# Grab the size of the background image and store as variables.
background_size = background_image.size
# Merge the images in the correct order.
final_image = Image.new('RGBA' , (background_size[0],background_size[1]) , (255,255,255))
final_image.paste(background_image,(0,0),background_image)
final_image.paste(nft_image,(30,140),nft_image)
final_image.paste(trim_image,(0,0),trim_image)
# Add the text title to the card.
title_text = card_name
final_image_editable = ImageDraw.Draw(final_image)
final_image_editable.text((60,40) , title_text , (50,50,50), font=title_font, align="center")
# Save the image to disk
print(os.path)
final_image.save(f"{filepath}\collections\{formatted_number}_card.png",format="png")
# Return the new image
return final_image
# Initialization complete
print("Card generator initialization complete.")
#######################
# Create User Interface
#######################
endpoint = st.sidebar.selectbox("Endpoints", ['Assets', 'Events', 'Rarity'])
st.header(f'Juno NFT Loot Inspector - {endpoint}')
st.sidebar.subheader("Filters")
collection = st.sidebar.text_input("Collection").lower()
owner = st.sidebar.text_input("Owner")
numberofassets = st.sidebar.number_input("Number of Assets", 0, 50, 1, 1)
offset = st.sidebar.number_input("Offset Token ID", 0, None, 0, 1)
order_direction = st.sidebar.selectbox("Order by",["asc","desc"])
if endpoint == 'Assets':
params = {}
if collection:
params['collection'] = collection
if owner:
params['owner'] = owner
if offset:
params['offset'] = int(offset)
if numberofassets:
params['limit'] = numberofassets
if order_direction:
params['order_direction'] = order_direction
r = requests.get("https://api.opensea.io/api/v1/assets", params=params)
response = r.json()
# Iterate through asset media and attempt to display.
for asset in response["assets"]:
# Debug prints to show asset values
if(log_card_metadata):
print("=============================")
print(f"{asset['collection']['name']} {asset['token_id']}")
print(f" - ID: {asset['id']}")
print(f" - Token ID: {asset['token_id']}")
print(f" - Contract Address: {asset['asset_contract']['address']}")
print("=============================")
# Generate a name for the asset
nft_name = f"{asset['collection']['name']} #{asset['token_id']}"
collection_name = f"{asset['collection']['name']}"
# TODO: Move all the images into their own collection folders again.
if not os.path.exists(f'{filepath}\collections\{collection_name}'):
os.mkdir(f'{filepath}\collections\{collection_name}')
# TODO: Rehook up the JSON exporter for the image_data folder.
if not os.path.exists(f'{filepath}\collections\{collection_name}\image_data'):
os.mkdir(f'{filepath}\collections\{collection_name}\image_data')
# if asset['name']:
# nft_name = asset['name']
# else:
# nft_name = f"{asset['collection']['name']} #{asset['token_id']}"
st.write(nft_name)
# Check to see the type of asset and use the correct player.
# TODO: Need a solution for SVGs and audio files.
if asset['image_url'].endswith('mp4'):
st.video(asset['image_url'])
else:
#st.image(asset['image_url'])
folder = os.path.dirname(os.path.realpath(__file__)) + "\collections"
img_data = requests.get(asset['image_url']).content
img_location = ''
# Check to see if we've already downloaded it.
if os.path.exists(f'{folder}+"\\"+nft_name+".jpg"') or os.path.exists(f'{folder}+"\\"+nft_name+".png"'):
print(f" Data -> [\u2713] (Already Downloaded)")
# Test for jpg vs png files
if asset['image_url'].endswith('jpg'):
jpg_save_name = folder + "\\" + nft_name + ".jpg"
img_location = jpg_save_name
with open(jpg_save_name, 'wb') as handler:
handler.write(img_data)
# Try png files
else:
png_save_name = folder + "\\" + nft_name + ".png"
img_location = png_save_name
with open(png_save_name, 'wb') as handler:
handler.write(img_data)
# Resize the NFT to fit our requirements.
interrim_image = Image.open(".\collections\\"+nft_name+".png")
interrim_image = interrim_image.resize([633, 633], None)
width, height = interrim_image.size
print(width,height)
interrim_image.save(".\collections\\"+nft_name+".png")
# Render as a playing card
final_image = CreateImageCard(asset['collection']['name'], asset['token_id'], nft_name, img_location)
# Render the image
st.image(final_image)
# Log image urls is we've got that debug flag on.
if(log_image_urls):
print(asset['image_url'])
st.write(r.json()) |
the-stack_0_23926 | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# solver adapted to run minizinc
# https://github.com/discreteoptimization/setcover/blob/master/minizinc_001/solver.py
from minizinc.solver_minizinc import solve_it
def solve_it2(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
first_line = lines[0].split()
node_count = int(first_line[0])
edge_count = int(first_line[1])
edges = []
for i in range(1, edge_count + 1):
line = lines[i]
parts = line.split()
edges.append((int(parts[0]), int(parts[1])))
# build a trivial solution
# every node has its own color
solution = range(0, node_count)
# prepare the solution in the specified output format
output_data = str(node_count) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/gc_4_1)')
|
the-stack_0_23927 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-cdk",
]
TEST_REQUIREMENTS = [
"pytest~=6.1",
"responses~=0.13",
"source-acceptance-test",
]
setup(
name="source_us_census",
description="Source implementation for Us Census.",
author="Airbyte",
author_email="[email protected]",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
|
the-stack_0_23929 | import cv2
import numpy as np
def RGB_mod_bright(img_obj, coeff = (1, 0)):
a = coeff[0]
b = coeff[1]
def trunc(val):
if val <= 0:
return 0
if val >= 255:
return 255
return val
return np.dstack([[[trunc(e * a + b) for e in l] for l in img_obj[:,:,0]], [[trunc(e * a + b) for e in l] for l in img_obj[:,:,1]], [[trunc(e * a + b) for e in l] for l in img_obj[:,:,2]]])
def noisy(image, noise_typ):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
def RGB_mod_saturation(img_obj, satuartion_mod):
img_obj = cv2.cvtColor(img_obj, cv2.COLOR_RGB2HSV).astype('float32')
(h, s, v) = cv2.split(img_obj)
s = s * satuartion_mod
s = np.clip(s,0,255)
img_obj = cv2.merge([h,s,v])
img_obj = cv2.cvtColor(img_obj.astype('uint8'), cv2.COLOR_HSV2RGB)
return img_obj
def RGB_deep_fry(img_obj, bright_coeff = (1, 0), gaussian_blur = (7, 7), satuartion_mod = 1, noise_type = 'gauss'):
# img_obj : opencv format for image
# brightness coeff : a = bright_coeff[0]; b = bright_coeff[1]; read RGB_mod_bright()
# gaussian_blur : kernel size
# satuartion_mod : satuartion multiply
# noise_type : gauss, poisson, s&p, speckle
# Change brightness
img_obj_ = RGB_mod_bright(img_obj, bright_coeff)
# Add blur
img_obj_ = cv2.GaussianBlur(img_obj, gaussian_blur, 0)
# Add satuartion
img_obj_ = RGB_mod_saturation(img_obj_, satuartion_mod)
# Add noise
img_obj_ = img_obj_.astype('float')
img_obj_ = np.multiply(img_obj_, 1/255)
img_obj_ = noisy(img_obj_, noise_type)
img_obj_ = np.multiply(img_obj_, 255)
img_obj_ = img_obj_.astype('int')
return img_obj_
|
the-stack_0_23930 | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Domain(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.polar"
_path_str = "layout.polar.domain"
_valid_props = {"column", "row", "x", "y"}
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this polar subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this polar subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this polar subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this polar subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this polar subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this polar subplot .
x
Sets the horizontal domain of this polar subplot (in
plot fraction).
y
Sets the vertical domain of this polar subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.layout.polar.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this polar subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this polar subplot .
x
Sets the horizontal domain of this polar subplot (in
plot fraction).
y
Sets the vertical domain of this polar subplot (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.layout.polar.Domain
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.layout.polar.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
_v = column if column is not None else _v
if _v is not None:
self["column"] = _v
_v = arg.pop("row", None)
_v = row if row is not None else _v
if _v is not None:
self["row"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_0_23932 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.misc import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1:-1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def wavedec(amn, hk):
gk = qmf(hk)
return NotImplemented
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float
Omega0. Default is 5
s : float
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-t^2/a^2)``,
where ``A = 2/sqrt(3a)pi^1/3``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
tsq = vec**2
mod = (1 - tsq / wsq)
gauss = np.exp(-tsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
|
the-stack_0_23934 | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import os
import sys
from fabric.api import *
from fabric.contrib.files import exists
def configure_http_proxy_server(config):
try:
if not exists('/tmp/http_proxy_ensured'):
sudo('yum -y install squid')
template_file = config['template_file']
proxy_subnet = config['exploratory_subnet']
put(template_file, '/tmp/squid.conf')
sudo('\cp /tmp/squid.conf /etc/squid/squid.conf')
sudo('sed -i "s|PROXY_SUBNET|{}|g" /etc/squid/squid.conf'.format(proxy_subnet))
sudo('sed -i "s|EDGE_USER_NAME|{}|g" /etc/squid/squid.conf'.format(config['project_name']))
sudo('sed -i "s|LDAP_HOST|{}|g" /etc/squid/squid.conf'.format(config['ldap_host']))
sudo('sed -i "s|LDAP_DN|{}|g" /etc/squid/squid.conf'.format(config['ldap_dn']))
sudo('sed -i "s|LDAP_SERVICE_USERNAME|{}|g" /etc/squid/squid.conf'.format(config['ldap_user']))
sudo('sed -i "s|LDAP_SERVICE_PASSWORD|{}|g" /etc/squid/squid.conf'.format(config['ldap_password']))
sudo('sed -i "s|LDAP_AUTH_PATH|{}|g" /etc/squid/squid.conf'.format('/usr/lib64/squid/basic_ldap_auth'))
replace_string = ''
for cidr in config['vpc_cidrs']:
replace_string += 'acl AWS_VPC_CIDR dst {}\\n'.format(cidr)
sudo('sed -i "s|VPC_CIDRS|{}|g" /etc/squid/squid.conf'.format(replace_string))
replace_string = ''
for cidr in config['allowed_ip_cidr']:
replace_string += 'acl AllowedCIDRS src {}\\n'.format(cidr)
sudo('sed -i "s|ALLOWED_CIDRS|{}|g" /etc/squid/squid.conf'.format(replace_string))
sudo('systemctl restart squid')
sudo('chkconfig squid on')
sudo('touch /tmp/http_proxy_ensured')
except Exception as err:
print("Failed to install and configure squid: " + str(err))
sys.exit(1)
def install_nginx_ldap(edge_ip, nginx_version, ldap_ip, ldap_dn, ldap_ou, ldap_service_pass, ldap_service_username):
try:
if not os.path.exists('/tmp/nginx_installed'):
sudo('yum install -y wget')
sudo('wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm')
try:
sudo('rpm -ivh epel-release-latest-7.noarch.rpm')
except:
print('Looks like EPEL is already installed.')
sudo(
'yum -y install gcc gcc-c++ make zlib-devel pcre-devel openssl-devel git openldap-devel')
sudo('mkdir -p /tmp/nginx_auth_ldap')
with cd('/tmp/nginx_auth_ldap'):
sudo('git clone https://github.com/kvspb/nginx-auth-ldap.git')
sudo('mkdir -p /tmp/src')
with cd('/tmp/src/'):
sudo('wget http://nginx.org/download/nginx-{}.tar.gz'.format(nginx_version))
sudo('tar -xzf nginx-{}.tar.gz'.format(nginx_version))
sudo('ln -sf nginx-{} nginx'.format(nginx_version))
with cd('/tmp/src/nginx/'):
sudo('./configure --user=nginx --group=nginx --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx \
--error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log \
--with-http_gzip_static_module --with-http_stub_status_module --with-http_ssl_module --with-pcre \
--with-http_realip_module --with-file-aio --with-ipv6 --with-http_v2_module --with-debug \
--without-http_scgi_module --without-http_uwsgi_module --without-http_fastcgi_module --with-http_sub_module \
--add-module=/tmp/nginx_auth_ldap/nginx-auth-ldap/')
sudo('make')
sudo('make install')
sudo('useradd -r nginx')
sudo('rm -f /etc/nginx/nginx.conf')
sudo('mkdir -p /opt/dlab/templates')
put('/root/templates', '/opt/dlab', use_sudo=True)
sudo('sed -i \'s/LDAP_IP/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ip))
sudo('sed -i \'s/LDAP_DN/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_dn))
sudo('sed -i \'s/LDAP_OU/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_ou))
sudo('sed -i \'s/LDAP_SERVICE_PASSWORD/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_pass))
sudo('sed -i \'s/LDAP_SERVICE_USERNAME/{}/g\' /opt/dlab/templates/nginx.conf'.format(ldap_service_username))
sudo('sed -i \'s/EDGE_IP/{}/g\' /opt/dlab/templates/conf.d/proxy.conf'.format(edge_ip))
sudo('cp /opt/dlab/templates/nginx.conf /etc/nginx/')
sudo('mkdir /etc/nginx/conf.d')
sudo('cp /opt/dlab/templates/conf.d/proxy.conf /etc/nginx/conf.d/')
sudo('mkdir /etc/nginx/locations')
sudo('cp /opt/dlab/templates/nginx_redhat /etc/init.d/nginx')
sudo('chmod +x /etc/init.d/nginx')
sudo('chkconfig --add nginx')
sudo('chkconfig --level 345 nginx on')
sudo('setsebool -P httpd_can_network_connect 1')
sudo('service nginx start')
sudo('touch /tmp/nginx_installed')
except Exception as err:
print("Failed install nginx with ldap: " + str(err))
sys.exit(1)
|
the-stack_0_23935 | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
the-stack_0_23938 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
from enum import Enum
import dataframe
from ._piping_exception import PipingException
class PipingMethod(Enum):
GROUP = 0
MODIFY = 1
AGGREGATE = 2
SUBSET = 3
class Pipeable:
"""
Class that allows piping of methods.
"""
def __init__(self, piping_method, *args):
"""
Constructor for chainable. Takes a tuple which is either a dataframe
and column names to group by or only the column names
:param args: tuple of params
"""
self.__piping_method = piping_method
if args and isinstance(args[0], dataframe.DataFrame):
raise PipingException("Wrong instantiation.")
elif not args:
raise ValueError("No arguments provided.")
else:
self.__args = args
def __rrshift__(self, other):
if self.__piping_method == PipingMethod.GROUP:
return other.group(*self.__args)
if self.__piping_method == PipingMethod.AGGREGATE:
return other.aggregate(self.__args[0],
self.__args[1],
*self.__args[2:])
if self.__piping_method == PipingMethod.SUBSET:
return other.subset(*self.__args)
if self.__piping_method == PipingMethod.MODIFY:
return other.modify(self.__args[0],
self.__args[1],
*self.__args[2:])
raise PipingException("Error when executing pipe.")
|
the-stack_0_23942 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or POPCoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19347 if testnet else 9347
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_0_23943 | #(c) 2013-2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
This module provides some basic FASTA I/O
"""
import logging
from string import maketrans
logger = logging.getLogger()
class FastaError(Exception):
pass
def read_fasta_dict(filename):
"""
Reads fasta file into dictionary. Also preforms some validation
"""
#logger.debug("Reading contigs file")
header = None
seq = []
fasta_dict = {}
try:
with open(filename, "r") as f:
for lineno, line in enumerate(f):
line = line.strip()
if line.startswith(">"):
if header:
fasta_dict[header] = "".join(seq)
seq = []
header = line[1:].split(" ")[0]
else:
if not _validate_seq(line):
raise FastaError("Invalid char in \"{0}\" at line {1}"
.format(filename, lineno))
seq.append(line)
if header and len(seq):
fasta_dict[header] = "".join(seq)
except IOError as e:
raise FastaError(e)
return fasta_dict
def write_fasta_dict(fasta_dict, filename):
"""
Writes dictionary with fasta to file
"""
with open(filename, "w") as f:
for header in sorted(fasta_dict):
f.write(">{0}\n".format(header))
for i in range(0, len(fasta_dict[header]), 60):
f.write(fasta_dict[header][i:i + 60] + "\n")
COMPL = maketrans("ATGCURYKMSWBVDHNXatgcurykmswbvdhnx",
"TACGAYRMKSWVBHDNXtacgayrmkswvbhdnx")
def reverse_complement(string):
return string[::-1].translate(COMPL)
def _validate_seq(sequence):
VALID_CHARS = "ACGTURYKMSWBDHVNXatgcurykmswbvdhnx"
if len(sequence.translate(None, VALID_CHARS)):
return False
return True
|
the-stack_0_23944 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Sinkhorn implementation for 1D Optimal Transport.
Sinkhorn algorithm was introduced in 1967 by R. Sinkhorn in the article
"Diagonal equivalence to matrices with prescribed row and column sums." in
The American Mathematical Monthly. It is an iterative algorithm that turns an
input matrix into a bi-stochastic, alternating between normalizing rows and
columns.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow.compat.v2 as tf
@gin.configurable
class Sinkhorn1D(object):
"""Runs the Sinkhorn algorithm for 1D inputs.
This class implements the stabilized Sinkhorn algorithm in log domain with
epsilon decay to speed up convergence.
Attributes:
cost: Tensor<float>[batch_size, n, m] the cost matrix of optimal transport.
eps: (float) the current level of regularization. This changes over time due
to the epsilon decay scheme.
epsilon: (float) the level of entropic regularization wanted.
epsilon_0: (float) the initial level of entropic regularization.
epsilon_decay: (float) a multiplicative factor applied at each iteration
until reaching the epsilon value.
inner_num_iter: (int32) the Sinkhorn error is not computed at each iteration
but every inner_num_iter instead to avoid computational overhead.
iterations: (int32) the actual number of applied iterations.
max_iterations: (int32) the maximum number of Sinkhorn iterations.
power: (float) power of the p-norm used in the cost matrix.
threshold: (float) the relative threshold on the Sinkhorn error to stop the
Sinkhorn iterations.
"""
def __init__(
self, epsilon=1e-3, epsilon_0=1e-1, epsilon_decay=0.95, power=2.0,
threshold=1e-2, inner_num_iter=5, max_iterations=2000):
self.epsilon = epsilon
self.epsilon_0 = epsilon_0
self.epsilon_decay = epsilon_decay
self.power = power
self.threshold = threshold
self.inner_num_iter = inner_num_iter
self.max_iterations = max_iterations
self._max_outer_iterations = max_iterations // inner_num_iter
def center(self, f, g):
"""Centers the cost matrix relatively to dual variables f and g."""
return self.cost - f[:, :, tf.newaxis] - g[:, tf.newaxis, :]
def softmin(self, f, g, eps, axis):
return -eps * tf.reduce_logsumexp(-self.center(f, g) / eps, axis=axis)
def error(self, f, g, eps, b):
"""Computes the maximum relative sinkhorn error over the batch."""
b_target = tf.math.reduce_sum(
tf.math.exp(-self.center(f, g) / eps), axis=1)
return tf.reduce_max(tf.abs(b_target - b) / b, axis=None)
def __call__(self, x, y, a, b):
"""Runs the Sinkhorn algorithm on input (x, a) and target (y, b).
Args:
x: Tensor<float>[batch, n]: the input point clouds.
y: Tensor<float>[batch, m]: the target point clouds.
a: Tensor<float>[batch, n]: the weight of each input point.
b: Tensor<float>[batch, m]: the weight of each target point.
Returns:
A Tensor<float>[batch, n, m] transport map. As a side effect, it also
stores the cost matrix, the number of applied iterations and the obtained
level of entropic regularization.
"""
self._b = b
loga = tf.math.log(a)
logb = tf.math.log(b)
self.cost = tf.pow(
tf.math.abs(x[:, :, tf.newaxis] - y[:, tf.newaxis, :]), self.power)
def body_fn(f, g, eps, num_iter):
"""A small loop of N Sinkhorn iterations."""
for _ in range(self.inner_num_iter):
g = eps * logb + self.softmin(f, g, eps, axis=1) + g
f = eps * loga + self.softmin(f, g, eps, axis=2) + f
eps = tf.math.maximum(eps * self.epsilon_decay, self.epsilon)
return [f, g, eps, num_iter + self.inner_num_iter]
def cond_fn(f, g, eps, num_iter):
return tf.math.reduce_all([
tf.math.less(num_iter, self.max_iterations),
tf.math.reduce_any([
tf.math.greater(eps, self.epsilon),
tf.math.greater(self.error(f, g, eps, b), self.threshold)
])
])
self._f, self._g, self.eps, self.iterations = tf.while_loop(
cond_fn, body_fn, [
tf.zeros(tf.shape(loga), dtype=x.dtype),
tf.zeros(tf.shape(logb), dtype=x.dtype),
tf.cast(self.epsilon_0, dtype=x.dtype),
tf.constant(0, dtype=tf.int32)
],
parallel_iterations=1,
maximum_iterations=self._max_outer_iterations + 1)
return tf.math.exp(-self.center(self._f, self._g) / self.eps)
|
the-stack_0_23946 | #!/usr/bin/env python3
from gpiozero import LED
from time import sleep
led = LED(25)
count = 25
while count > 0:
led.on()
sleep(0.2)
led.off()
sleep(0.8)
count -= 1 |
the-stack_0_23947 | # IMPORTS
import copy
from flask import Blueprint, render_template, request, flash
from app import db, requires_roles
from models import User, Draw, decrypt
from flask_login import login_required, current_user
# CONFIG
admin_blueprint = Blueprint('admin', __name__, template_folder='templates')
# VIEWS
# view admin homepage
@admin_blueprint.route('/admin')
@login_required
@requires_roles('admin')
def admin():
return render_template('admin.html', name=current_user.firstname)
# view all registered users
@admin_blueprint.route('/view_all_users', methods=['POST'])
@login_required
@requires_roles('admin')
def view_all_users():
return render_template('admin.html', name=current_user.firstname,
current_users=User.query.filter_by(role='user').all())
# create a new winning draw
@admin_blueprint.route('/create_winning_draw', methods=['POST'])
@login_required
@requires_roles('admin')
def create_winning_draw():
# get current winning draw
current_winning_draw = Draw.query.filter_by(win=True).first()
round = 1
# if a current winning draw exists
if current_winning_draw:
# update lottery round by 1
round = current_winning_draw.round + 1
# delete current winning draw
db.session.delete(current_winning_draw)
db.session.commit()
# get new winning draw entered in form
submitted_draw = ''
for i in range(6):
submitted_draw += request.form.get('no' + str(i + 1)) + ' '
# remove any surrounding whitespace
submitted_draw.strip()
# create a new draw object with the form data and the user id and draw key of the current user.
new_winning_draw = Draw(user_id=current_user.id, draw=submitted_draw, win=True, round=round,
draw_key=current_user.draw_key)
# add the new winning draw to the database
db.session.add(new_winning_draw)
db.session.commit()
# re-render admin page
flash("New winning draw added.")
return admin()
# view current winning draw
@admin_blueprint.route('/view_winning_draw', methods=['POST'])
@login_required
@requires_roles('admin')
def view_winning_draw():
# get winning draw from DB and decrypting it
current_winning_draw = Draw.query.filter_by(win=True).first()
decrypted_winning_draw = current_winning_draw.view_draw(current_user.draw_key)
# if a winning draw exists
if current_winning_draw:
# re-render admin page with current winning draw and lottery round
return render_template('admin.html', winning_draw=current_winning_draw, name=current_user.firstname)
# if no winning draw exists, rerender admin page and return error message
flash("No winning draw exists. Please add winning draw.")
return admin()
# view lottery results and winners
@admin_blueprint.route('/run_lottery', methods=['POST'])
@login_required
@requires_roles('admin')
def run_lottery():
# get current unplayed winning draw and make a copy for decryption
current_winning_draw = Draw.query.filter_by(win=True, played=False).first()
current_winning_draw_copy = copy.deepcopy(current_winning_draw)
# if current unplayed winning draw exists
if current_winning_draw:
# get all unplayed user draws
user_draws = Draw.query.filter_by(win=False, played=False).all()
# copy the list for decryption which wont affect database
draw_copies = list(map(lambda x: copy.deepcopy(x), user_draws))
results = []
# decrypt all draws in the draw_copy list
for f in draw_copies:
user = User.query.filter_by(id=f.user_id).first()
decrypt(f.draw, user.draw_key)
# if at least one unplayed user draw exists
if user_draws:
# update current winning draw as played
current_winning_draw.played = True
db.session.add(current_winning_draw)
db.session.commit()
# for each unplayed user draw
for draw in user_draws:
# get the owning user (instance/object)
user = User.query.filter_by(id=draw.user_id).first()
# if user draw matches current unplayed winning draw, check with the decrypted copies of the draws
# When trying to use draw.view_draw(user.draw_key) getting a memory error
# so using decrypt function instead
if decrypt(draw.draw, user.draw_key) == decrypt(current_winning_draw_copy.draw, current_user.draw_key):
# add details of winner to list of results
results.append((current_winning_draw_copy.round, decrypt(draw.draw, user.draw_key),
draw.user_id, user.email))
# update draw as a winning draw (this will be used to highlight winning draws in the user's
# lottery page)
draw.match = True
# update draw as played
draw.played = True
# update draw with current lottery round
draw.round = current_winning_draw.round
# commit draw changes to DB
db.session.add(draw)
db.session.commit()
# if no winners
if len(results) == 0:
flash("No winners.")
return render_template('admin.html', results=results, name=current_user.firstname)
flash("No user draws entered.")
return admin()
# if current unplayed winning draw does not exist
flash("Current winning draw expired. Add new winning draw for next round.")
return admin()
# view last 10 log entries
@admin_blueprint.route('/logs', methods=['POST'])
@login_required
@requires_roles('admin')
def logs():
# opening the file and spliting the last 10 lines in reverse order so last shows first
with open('lottery.log', 'r') as f:
content = f.read().splitlines()[-10:]
content.reverse()
return render_template('admin.html', logs=content, name=current_user.firstname)
|
the-stack_0_23948 | """ Plot the spectral sequence """
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
from astropy.table import Table
from astropy.cosmology import Planck15
from astropy.time import Time
import glob
import sys
sys.path.append("/Users/annaho/Github/Spectra")
sys.path.append("/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/code")
from load_lc import get_lc
from normalize import smooth_spec
from measure_snr import get_snr
def get_files():
files = np.array(glob.glob(
"/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data/spec/ZTF18abukavn/*.ascii"))
dt = np.zeros(len(files))
tels = []
cols = np.array([""]*len(dt), dtype='U10')
# Read in all of the files, pull out the corresponding dates, and sort by date
t0 = 2458370.6634 # in JD
for ii,f in enumerate(files):
tel = f.split("_")[2]
tels.append(tel)
alldat = open(f).readlines()
if tel == 'LT':
for line in alldat:
if 'DATE-OBS' in line:
obsdate = line[13:36]
t = Time(obsdate, format='isot').jd
dt[ii] = t-t0
cols[ii] = 'magenta'
elif tel == 'P200':
for line in alldat:
if 'UT shutter open' in line:
obsdate = line[12:35]
t = Time(obsdate, format='isot').jd
dt[ii] = t-t0
cols[ii] = 'lightblue'
elif tel == 'Keck1':
for line in alldat:
if 'DATE_BEG' in line:
obsdate = line[13:32]
t = Time(obsdate, format='isot').jd
dt[ii] = t-t0
cols[ii] = 'red'
elif tel == 'DCT':
obsdate = '2018-09-14T00:00:00' # temporary
t = Time(obsdate, format='isot').jd
dt[ii] = t-t0
cols[ii] = 'yellow'
elif tel == 'NOT':
obsdate = '2018-09-17T00:00:00' # temporary
t = Time(obsdate, format='isot').jd
dt[ii] = t-t0
cols[ii] = 'green'
elif tel == 'P60':
for line in alldat:
if 'MJD_OBS' in line:
obsdate = float(line[11:25])
t = Time(obsdate, format='mjd').jd
dt[ii] = t-t0
cols[ii] = 'black'
elif tel == 'omr.ascii':
# first Xinglong spectrum
t = Time('2018-09-21T11:15:10.0').jd
dt[ii] = t-t0
elif tel == 'Bfosc.ascii':
# second Xinglong spectrum
t = Time('2018-09-25T11:16:43.0').jd
dt[ii] = t-t0
else:
print("couldn't find telescope")
print(tel)
order = np.argsort(dt)
files_sorted = files[order]
dt_sorted = dt[order]
tel_sorted = np.array(tels)[order]
cols = cols[order]
return files_sorted, dt_sorted, tel_sorted
def get_res(tel):
""" Here, this means the width of a line in Angstroms """
if tel == 'LT':
res = 18 # Angstrom, res at central wavelength
res = 30 # add a couple of Ang?
elif tel == 'P200':
res = 10 # determined by eye from the spectrum
# basically, width of a galaxy emission line is 10 AA
# and each pixel is 1 AA
elif tel == 'Keck1':
res = 7*2 # determined by eye from spectrum
# width of a line is around 7 pixels
# and each pixel is 2 Angstroms
elif tel == 'NOT':
# width of a line is around 8 pixels
# and each pixel is around 2.63 Ang
res = 8*2.63
elif tel == 'DCT':
# width of a line is around 7 pixels
# and each pixel is 2.2 Ang
res = 7*2.2
elif tel == 'P60':
res = 20
elif 'ascii' in tel:
# Xinglong spectrum
res = 26
else:
res = 1
return res
def load_spec(f, tel):
""" load data from spec file """
dat = np.loadtxt(f)
wl = dat[:,0]
flux = dat[:,1]
if tel == 'Keck':
eflux = dat[:,3]
else:
# need to estimate uncertainty from scatter
eflux = np.array([get_snr(wl, flux, 6000, 6200)]*len(wl))
ivar = 1/eflux**2
return wl, flux, ivar
def plot_spec(ax, x, y, tel, epoch):
""" plot the spectrum """
choose_x = np.logical_and(x >= 3200, x<= 9300)
choose = choose_x
ax.plot(
x[choose], y[choose], c='lightgrey',
drawstyle='steps-mid', lw=0.5, alpha=0.4)
return ax
def plot_smoothed_spec(ax, x, y, ivar, tel, epoch, ls='-', lw=0.5, c='black', label=None):
""" plot the smoothed spectrum """
res = get_res(tel)
choose_x = np.logical_and(x >= 3200, x<= 9300)
choose = choose_x
smoothed = smooth_spec(x, y, ivar, res*3)
ax.plot(
x[choose], smoothed[choose], c=c,
drawstyle='steps-mid', lw=lw, ls=ls, alpha=1.0, label=label)
dt_str = r"+%s\,d" %str(np.round(epoch, 1))
ax.text(
x[choose][-1]+100, smoothed[choose][-1], s=dt_str,
horizontalalignment='left', verticalalignment='center',
fontsize=14)
return ax
def choose_lines(z, dt):
""" choose galaxy emission lines given the epoch """
balmer = np.array([6564.61, 4862.68, 4341.68, 4102.89, 3970.072])
oiii = np.array([4363, 4932.6, 4960.295, 5008.24]) # O III
oii = np.array([3727.092, 3729.875])
nii = np.array([6549.86])
oi = np.array([6302.046, 6365.536])
gal_wl = np.hstack((balmer, oiii, oii)) * (z+1)
return gal_wl
def plot_lines(ax, z, tel, dt):
""" Plot galaxy emission lines for a particular redshift """
res = get_res(tel)
gal_wl = choose_lines(z, dt)
for val in gal_wl:
ax.axvspan(
val-res/2, val+res/2, ls='--', color='grey', lw=0.5, alpha=0.5)
def clip_lines(wl, flux, z, tel, dt):
res = get_res(tel)
gal_wl = choose_lines(z, dt)
for line in gal_wl:
choose = np.logical_and(wl >= line-res/2, wl <= line+res/2)
flux = np.interp(wl, wl[~choose], flux[~choose]) # interp over features
return wl, flux
def get_tellurics():
start = np.array([7594, 6853])
end = np.array([7678, 6950])
return start, end
def clip_tellurics(wl, flux):
start, end = get_tellurics()
for ii,beg in enumerate(start):
choose = np.logical_and(wl >= beg, wl <= end[ii])
flux = np.interp(wl, wl[~choose], flux[~choose])
return wl, flux
def plot_tellurics():
col = 'pink'
plt.axvspan(7594, 7678, ls='--', color=col, lw=0.5, alpha=0.5)
plt.axvspan(6853, 6950, ls='--', color=col, lw=0.5, alpha=0.5)
def fluxcal(wl, flux, dt_spec):
""" Flux-calibrate to R-band light curve """
# get r-band LC
dt, filt, mag, emag = get_lc()
det = np.logical_and(mag<99, ~np.isnan(mag))
nondet = np.logical_or(mag==99, np.isnan(mag))
choose = np.logical_and(det, filt=='r')
# interpolate to this epoch
rval = np.interp(dt_spec, dt[choose], mag[choose])
# TEMP: r is roughly 658nm +/- 138nm
# TEMP: assume AB mag
lam = 6580 # in angstroms
c = 3E18 # angstrom/s
fnu = 1E-23 * 3631 * 10**(rval/(-2.5)) # erg/s/cm2/Hz
flam = fnu * (c/lam**2) # should be erg/s/cm2/AA
# scale factor
flam_meas = np.interp(lam, wl, flux)
#scale = (flam/flam_meas)/1E-15
return wl, flux
if __name__=="__main__":
z = 0.03154
files, epochs, tels = get_files()
start = 0
end = 19
files = files[start:end]
epochs = epochs[start:end]
tels = tels[start:end]
nfiles = len(files)
fig,axarr = plt.subplots(
1, 2, figsize=(10,10), sharex=True)
for ii,f in enumerate(files):
if ii < nfiles/2:
ax = axarr[0]
else:
ax = axarr[1]
tel = tels[ii]
dt = epochs[ii]
wl, flux, ivar = load_spec(f, tel)
print(tel)
wl, flux = clip_lines(wl, flux, z, tel, dt)
wl, flux = clip_tellurics(wl, flux)
wl, flux = fluxcal(wl, flux, dt)
if ii < nfiles/2:
scale = (flux[wl > 3800][0])/2
plot_spec(ax, wl, flux/scale+nfiles/2-ii%(nfiles/2), tel, dt)
plot_smoothed_spec(
ax, wl, flux/scale+nfiles/2-ii%(nfiles/2), ivar, tel, dt)
else:
scale = (flux[wl > 4600][0])/2
plot_spec(ax, wl, flux/scale+nfiles/2-ii%(nfiles/2), tel, dt)
plot_smoothed_spec(
ax, wl, flux/scale+nfiles/2-ii%(nfiles/2), ivar, tel, dt)
ax.tick_params(axis='both', labelsize=14)
axarr[0].set_ylabel(
r"Scaled $F_{\lambda}$ + constant",
fontsize=16)
axarr[0].set_xlabel(r"Observed Wavelength (\AA)", fontsize=16)
axarr[1].set_xlabel(r"Observed Wavelength (\AA)", fontsize=16)
axarr[1].get_yaxis().set_ticks([])
plt.xlim(3000, 11000)
#plt.xlim(4900, 5200)
plt.subplots_adjust(wspace=0)
axarr[0].set_ylim(0,12)
axarr[1].set_ylim(1,12)
#plt.tight_layout()
plt.savefig("spec_sequence.eps", dpi=300, bbox_inches='tight')
#plt.show()
#plt.close()
|
the-stack_0_23950 |
from loa.team import Team
from .myunit import YUnit, YUnit2, YUnit3, YUnit4, YUnit5, YUnit6, YUnit7, YUnit8, YUnit9, YUnit10
def get_team():
return YTeam("세깅이")
class YTeam(Team):
def initialize(self):
self.units.append(YUnit(self,"A-Unit01",0))
self.units.append(YUnit2(self,"A-Unit02",1))
self.units.append(YUnit3(self,"A-Unit03",2))
self.units.append(YUnit4(self,"A-Unit04",3))
self.units.append(YUnit5(self,"A-Unit05",4))
self.units.append(YUnit6(self,"A-Unit06",5))
self.units.append(YUnit7(self,"A-Unit07",6))
self.units.append(YUnit8(self,"A-Unit08",7))
self.units.append(YUnit9(self,"A-Unit09",8))
self.units.append(YUnit10(self,"A-Unit10",9))
def arrange(self, enemy: Team):
UnitCopy=[]
collist=[0 for col in range(10)]
result=[0 for col in range(10)]
selected=[0,0,0,0,0,0,0,0,0,0]
HpConduct=[[[0, 0,0] for col in range(10)]for row in range(10)]
for i in range(10):
for j in range(10): #i번째 enemy의 위치(행)에 있는 값에 대한 j번째 나의 위치(열)에 있는 값의 계산
if self.units[j] is not None and enemy.units[i] is not None:
HpConduct[i][j][2]=j
HpConduct[i][j][1]=max(0, (enemy.units[i].hp - max(1, self.units[j].att - enemy.units[i].arm)))
if (self.units[j].hp - max(1, 0.5*enemy.units[i].att - self.units[j].arm))>0:
HpConduct[i][j][0]=self.units[j].hp - max(1, 0.5*enemy.units[i].att - self.units[j].arm)
else:
HpConduct[i][j][0]=0
# enemy1-self1 enemy1-self2 enemy1-self3...
# enemy2-self1 enemy2-self2 enemy2-self3....
# 0>myhp, 1>enemyhp
for i in range(10):
collist[i]=HpConduct[i][:]
collist[i].sort(key=lambda x:(x[1], -x[0]))
#results[i][j]->i:myhp , j:enemyhp(최적위치)
for i in range(10):
results=[[],[],[],[],[],[],[],[],[],[]]
for k in range(10):
results[collist[k][i][2]].append(k)
for j in range(10):
if result[j]==0 :
if len(results[j])==1 and selected[results[j][0]]==0:
result[j]=results[j][0]
selected[results[j][0]]=1
if len(results[j])>1:
get=[]
for k in range(len(results[j])):
if selected[results[j][k]]==0:
get.append(HpConduct[j][results[j][k]])
if len(get)>1:
get.sort(key=lambda x:(x[1], -x[0]))
result[j]=get[0][2]
selected[get[0][2]]=1
for i in range(10):
if result[i]==0:
results=[[0,0,0] for col in range(10)]
for k in range(10):
#HpConduct[j][i]
HpConduct[k][i][2]=k
results[k][0]=HpConduct[k][i][0]
results[k][1]=HpConduct[k][i][1]
results[k][2]=HpConduct[k][i][2]
results.sort(key=lambda x:(x[1],-x[0]))
get=[]
for k in range(10):
if (results[k][2] not in result) or (selected[0]==0 and results[k][2]==0):
get.append(results[k])
if len(get)>=1:
get.sort(key=lambda x:(x[1],-x[0]))
result[i]=get[0][2]
selected[get[0][2]]=1
for l in range(10):
for m in range(10):
if self.units[m] is not None :
if self.units[m].pos==result[l]:
UnitCopy=self.units[l]
self.units[l]=self.units[m]
self.units[m]=UnitCopy
for n in range(10):
if self.units[n] is not None:
self.units[n].pos=n
|
the-stack_0_23951 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="covid19_dashboard",
version="0.0.1",
author="Max Bennett",
author_email="[email protected]",
description="A personalized dashboard which maps up to date covid data to a web template",
long_description="Using a webpage template this package creates a dashboard displaying up to date covid data from "
"an api, it also contains news articles obtained from a news api and you can remove articles and "
"schedule updates for yourself",
long_description_content_type="text/markdown",
url="",
classifiers=[
"Programming Language :: Python :: 3",
"License :: Freeware",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
)
|
the-stack_0_23952 | """
functions.py
A brief introduction on python project best practices
Handles the primary functions
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def calculate_distance(rA, rB):
# This function calculates the distance between two points given as numpy arrays.
d=(rA-rB)
dist=np.linalg.norm(d)
return dist
def open_pdb(f_loc):
# This function reads in a pdb file and returns the atom names and coordinates.
with open(f_loc) as f:
data = f.readlines()
c = []
sym = []
for l in data:
if 'ATOM' in l[0:6] or 'HETATM' in l[0:6]:
sym.append(l[76:79].strip())
c2 = [float(x) for x in l[30:55].split()]
c.append(c2)
coords = np.array(c)
return sym, coords
atomic_weights = {
'H': 1.00784,
'C': 12.0107,
'N': 14.0067,
'O': 15.999,
'P': 30.973762,
'F': 18.998403,
'Cl': 35.453,
'Br': 79.904,
}
def open_xyz(file_location):
# Open an xyz file and return symbols and coordinates.
xyz_file = np.genfromtxt(fname=file_location, skip_header=2, dtype='unicode')
symbols = xyz_file[:,0]
coords = (xyz_file[:,1:])
coords = coords.astype(np.float)
return symbols, coords
def write_xyz(file_location, symbols, coordinates):
# Write an xyz file given a file location, symbols, and coordinates.
num_atoms = len(symbols)
with open(file_location, 'w+') as f:
f.write('{}\n'.format(num_atoms))
f.write('XYZ file\n')
for i in range(num_atoms):
f.write('{}\t{}\t{}\t{}\n'.format(symbols[i],
coordinates[i,0], coordinates[i,1], coordinates[i,2]))
def draw_molecule(coordinates, symbols, draw_bonds=None, save_location=None, dpi=300):
# Draw a picture of a molecule using matplotlib.
# Create figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Get colors - based on atom name
colors = []
for atom in symbols:
colors.append(atom_colors[atom])
size = np.array(plt.rcParams['lines.markersize'] ** 2)*200/(len(coordinates))
ax.scatter(coordinates[:,0], coordinates[:,1], coordinates[:,2], marker="o",
edgecolors='k', facecolors=colors, alpha=1, s=size)
# Draw bonds
if draw_bonds:
for atoms, bond_length in draw_bonds.items():
atom1 = atoms[0]
atom2 = atoms[1]
ax.plot(coordinates[[atom1,atom2], 0], coordinates[[atom1,atom2], 1],
coordinates[[atom1,atom2], 2], color='k')
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi, graph_min=0, graph_max=2)
return ax
def calculate_angle(rA, rB, rC, degrees=False):
# Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees
# by setting degrees=True
AB = rB - rA
BC = rB - rC
theta=np.arccos(np.dot(AB, BC)/(np.linalg.norm(AB)*np.linalg.norm(BC)))
if degrees:
return np.degrees(theta)
else:
return theta
def bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):
# Draw a histogram of bond lengths based on a bond_list (output from build_bond_list function)
lengths = []
for atoms, bond_length in bond_list.items():
lengths.append(bond_length)
bins = np.linspace(graph_min, graph_max)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Bond Length (angstrom)')
plt.ylabel('Number of Bonds')
ax.hist(lengths, bins=bins)
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi)
return ax
def build_bond_list(coordinates, max_bond=1.5, min_bond=0):
# Find the bonds in a molecule (set of coordinates) based on distance criteria.
bonds = {}
num_atoms = len(coordinates)
for atom1 in range(num_atoms):
for atom2 in range(atom1, num_atoms):
distance = calculate_distance(coordinates[atom1], coordinates[atom2])
if distance > min_bond and distance < max_bond:
bonds[(atom1, atom2)] = distance
return bonds
atom_colors = {
'H': 'white',
'C': '#D3D3D3',
'N': '#add8e6',
'O': 'red',
'P': '#FFA500',
'F': '#FFFFE0',
'Cl': '#98FB98',
'Br': '#F4A460',
'S': 'yellow'
}
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from Henry David Thoreau"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
|
the-stack_0_23953 | # 12/25/20
from Helpers.FileHelper import readFile
from typing import List
FILEPATH: str = "2020/Input/day25.txt"
def calculateLoopSize(key: int) -> int:
val: int = 1
subjectNumber: int = 7
numLoops: int = 0
while val != key:
val *= subjectNumber
val %= 20201227
numLoops += 1
return numLoops
def calculateEncryptionKey(subjectNumber: int, numLoops: int) -> int:
"""
Calculates encryption key
"""
val: int = 1
for _ in range(numLoops):
val *= subjectNumber
val %= 20201227
return val
def main():
inputLines: List[str] = [s.strip() for s in readFile(FILEPATH)]
cardPublicKey, doorPublicKey = int(inputLines[0]), int(inputLines[1])
# Part 1
# This operation takes ~2 minutes, so I ran the code and hand put the values in below
# cardLoops: int = calculateLoopSize(cardPublicKey)
# doorLoops: int = calculateLoopSize(doorPublicKey)
cardLoops: int = 8987376
doorLoops: int = 14382089
print(f"Part 1 -- Encryption Key: {calculateEncryptionKey(cardPublicKey, doorLoops)}")
# No part 2 xD
if __name__ == "__main__":
main()
"""
--- Day 25: Combo Breaker ---
--- Part One ---
The card always uses a specific, secret loop size when it transforms a subject number. The door always
uses a different, secret loop size.
The cryptographic handshake works like this:
The card transforms the subject number of 7 according to the card's secret loop size. The result is called the card's public key.
The door transforms the subject number of 7 according to the door's secret loop size. The result is called the door's public key.
The card and door use the wireless RFID signal to transmit the two public keys (your puzzle input) to the other device. Now, the card has the door's public key, and the door has the card's public key. Because you can eavesdrop on the signal, you have both public keys, but neither device's loop size.
The card transforms the subject number of the door's public key according to the card's loop size. The result is the encryption key.
The door transforms the subject number of the card's public key according to the door's loop size. The result is the same encryption key as the card calculated.
If you can use the two public keys to determine each device's loop size, you will have enough information
to calculate the secret encryption key that the card and door use to communicate; this would let you
send the unlock command directly to the door!
What encryption key is the handshake trying to establish?
--- Part Two ---
The light turns green and the door unlocks. As you collapse onto the bed in your room, your pager goes off!
"It's an emergency!" the Elf calling you explains. "The soft serve machine in the cafeteria on sub-basement
7 just failed and you're the only one that knows how to fix it! We've already dispatched a reindeer to
your location to pick you up."
You hear the sound of hooves landing on your balcony.
The reindeer carefully explores the contents of your room while you figure out how you're going to pay
the 50 stars you owe the resort before you leave. Noticing that you look concerned, the reindeer wanders
over to you; you see that it's carrying a small pouch.
"Sorry for the trouble," a note in the pouch reads. Sitting at the bottom of the pouch is a gold coin
with a little picture of a starfish on it.
Looks like you only needed 49 stars after all. =)
""" |
the-stack_0_23954 | from pyparsing import *
def _constructGrammar():
ParserElement.setDefaultWhitespaceChars(' \t')
newline = Optional(Suppress('\n'))
openbrace = Suppress("{")
closebrace = Suppress("}")
openbracket = Suppress("(")
closebracket = Suppress(")")
quote = Suppress("\"")
colon = Suppress(":")
def justStringNotList(tokens):
return tokens[0]
def paramGroup(tokens):
params = {}
i = 0
for token in tokens:
if len(token) > 1:
params[token[0]] = token[1]
else:
params[i] = token[0]
i += 1
return params
# Grammar
name = Optional(openbracket) + quote + Word(alphanums+' ').setResultsName('name').setParseAction(justStringNotList) + quote + Optional(closebracket)
parameter = Group(Optional(Word(alphas)) + Optional(colon) + quote + Word(alphanums+' '+'-') + quote)
optionalParamGroup = Optional(openbracket) + OneOrMore(parameter).setParseAction(paramGroup).setResultsName('options') + Optional(closebracket) + newline
step = Word(alphas) + optionalParamGroup
steps = Group(Or([Keyword('with'), Word(alphas)]).setResultsName('type') + optionalParamGroup+ Optional(Group(openbrace + newline + OneOrMore(step) + closebrace)) + newline)
stage = Group(Keyword("stage") + name + openbrace + newline + OneOrMore(steps).setResultsName('steps') + newline + closebrace + newline)
options = Group(Word(alphas) + colon + Optional(quote) + Word(alphanums+' ') + Optional(quote)) + newline
pipeline = Keyword("pipeline") + name + openbrace + newline + ZeroOrMore(options).setResultsName('options').setParseAction(paramGroup) + OneOrMore(stage).setResultsName('stages') + closebrace
return pipeline
DSL = _constructGrammar()
def parse(pipeline):
return DSL.parseString(pipeline)
class BaseClass(str):
def _inheritParameter(self, parameter, tokens):
if parameter in tokens:
self.__dict__[parameter] = tokens.__getattr__(parameter)
def __init__(self, tokens):
self._inheritParameter('name', tokens)
self._inheritParameter('options', tokens)
|
the-stack_0_23956 | # Copyright (c) 2021 Johnathan P. Irvin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from typing import List
from viking.connections.events.connections import SocketClosedEvent, SocketOpenedEvent
from viking.events import EventBus
from .interfaces import AbstractConnectionManager, AbstractSocket
class ConnectionManager(AbstractConnectionManager):
def __init__(self, event_bus: EventBus) -> None:
"""
Initialize the connection manager.
"""
self._connections: List[AbstractSocket] = []
self._event_bus: EventBus = event_bus
async def connect(self, socket: AbstractSocket) -> None:
"""
Connect a socket to the manager.
Args:
socket (AbstractSocket): The socket to connect.
"""
await socket.accept()
self._connections.append(socket)
self._event_bus.publish(SocketOpenedEvent(socket))
async def disconnect(self, socket: AbstractSocket) -> None:
"""
Disconnect a socket from the manager.
Args:
socket (AbstractSocket): The socket to disconnect.
"""
self._connections.remove(socket)
self._event_bus.publish(SocketClosedEvent(socket))
async def get_connections(self) -> List[AbstractSocket]:
"""
Return a list of all socket connected to the manager.
Returns:
List[AbstractSocket]: A list of all socket connected to the manager.
"""
return self._connections
|
the-stack_0_23962 | import pandas as pd
import pytest
from m2p import PolyMaker
from rdkit import Chem
pm = PolyMaker()
@pytest.fixture
def vinyl():
return pm.thermoplastic("C=C", DP=10, mechanism="vinyl").polymer[0]
@pytest.fixture
def RS_ester_reactants():
poly_name = ["poly(RS-3-hydroxybutyrate)"]
smiles = ["C[C@@H](O)CC(=O)O.C[C@H](O)CC(=O)O"]
reactants = pd.DataFrame({"smiles": smiles}, index=poly_name)
reactants["monomers"] = reactants.smiles.apply(
lambda s: pm.get_monomers(s, stereochemistry=True)
)
return reactants
def calc_pm(smi):
# Dict to count
RS_list = []
mol = Chem.MolFromSmiles(smi)
for atom in mol.GetAtoms():
try:
chiral_center = atom.GetProp("_CIPCode")
RS_list.append(chiral_center)
except:
pass
# Zip together list to get neighbors and determine R vs M addition
is_M_add = list(map(lambda t: t[0] == t[1], zip(*(RS_list, RS_list[1:]))))
pm = sum(is_M_add) / len(is_M_add)
return pm
def test_vinyl(vinyl):
smile_vinyl = vinyl
assert smile_vinyl == "CCCCCCCCCCCCCCCCCCCC"
def test_ester_stereo_iso(RS_ester_reactants):
poly_df = pm.thermoplastic(
RS_ester_reactants, DP=10, mechanism="ester_stereo", pm=1, verbose=False
)
pmeso = calc_pm(poly_df["polymer"][0])
assert len(poly_df) == 1
assert pmeso == 1
def test_ester_stereo_syn(RS_ester_reactants):
poly_df = pm.thermoplastic(
RS_ester_reactants, DP=10, mechanism="ester_stereo", pm=0, verbose=False
)
pmeso = calc_pm(poly_df["polymer"][0])
pmeso
assert len(poly_df) == 1
assert pmeso == 0
def test_ester_stereo_a(RS_ester_reactants):
poly_df = pm.thermoplastic(
RS_ester_reactants, DP=10, mechanism="ester_stereo", pm=0.5, verbose=False
)
pmeso = calc_pm(poly_df["polymer"][0])
pmeso
assert len(poly_df) == 1
assert 0 < pmeso and pmeso < 1
def test_df_pm_ester_stereo(RS_ester_reactants):
RS_ester_reactants["pm"] = 1
poly_df = pm.thermoplastic(
RS_ester_reactants, DP=10, mechanism="ester_stereo", verbose=False
)
pmeso = calc_pm(poly_df["polymer"][0])
pmeso
assert len(poly_df) == 1
assert pmeso == 1
|
the-stack_0_23963 | import sys,math,io,os,time,itertools,collections
mod=10**9+7
sys.setrecursionlimit(10000)
i=sys.stdin.readline
p=sys.stdout.write
#use sys.stdout.write() (remember to convert to str b4 and concatenate "\n")
global start,end
#binary search
def isin(l,x):
left=0
right=len(l)-1
if x<l[0]:
return -1
while left<=right:
mid=left + (right -left)//2
if l[mid]==x:
return mid
elif l[mid]<x:
ans=mid
left=mid+1
else:
right=mid-1
return ans
#is palindrome or not
def ispal(l):
n=len(l)
for i in range(n//2+1):
if l[i]!=l[n-i-1]:
return False
return True
#coordinate compression
def ccarray(l):
d={l[k]:k for k in range(len(l))}
m=sorted(d)
return [d[m[k]] for k in range(len(l))]
#checks if prime or not
def is_prime(n):
if n<=3:
return n>1
if n%2==0 or n%3==0:
return False
k=5
while k**2<=n:
if n%k==0 or n%(k+2)==0:
return False
k+=6
return True
#sieve of eratosthenes
def sieve(n):
prime=[True for k in range(n+1)]
p=2
while p*p<=n:
if prime[p]==True:
for k in range(p*p,n+1,p):
prime[k]=False
p+=1
def main():
a,b=[int(k) for k in i().strip().split()]
if math.gcd(a,b)==1:
p("Finite\n")
else:
p("Infinite\n")
t=1
t=int(i())
start=time.perf_counter()
for _ in range(t):
main()
end=time.perf_counter()
#print(end-start) |
the-stack_0_23964 | """Tests for collections_overlay.py."""
from pytype.overlays import collections_overlay
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
import unittest
class NamedTupleAstTest(unittest.TestCase):
"""Test collection_overlay's namedtuple AST generation."""
PYTHON_VERSION = (2, 7)
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.PYTHON_VERSION)
def test_basic(self):
ast = self._namedtuple_ast("X", [])
typeparam, = ast.type_params
self.assertEqual("X", typeparam.bound.name)
nt = ast.Lookup("X")
self.assertEqual("def __init__(self, *args, **kwargs) -> None: ...",
pytd_utils.Print(nt.Lookup("__init__")))
make_sig, = nt.Lookup("_make").signatures
replace_sig, = nt.Lookup("_replace").signatures
self.assertEqual("_TX", make_sig.return_type.name)
self.assertEqual("_TX", replace_sig.return_type.name)
def test_no_fields(self):
nt = self._namedtuple_ast("X", []).Lookup("X")
self.assertEqual("Tuple[nothing, ...]",
pytd_utils.Print(nt.Lookup("_fields").type))
getnewargs_sig, = nt.Lookup("__getnewargs__").signatures
self.assertEqual("Tuple[nothing, ...]",
pytd_utils.Print(getnewargs_sig.return_type))
self.assertEqual("def __new__(cls: Type[_TX]) -> _TX: ...",
pytd_utils.Print(nt.Lookup("__new__")))
def test_fields(self):
nt = self._namedtuple_ast("X", ["y", "z"]).Lookup("X")
self.assertEqual("Tuple[str, str]",
pytd_utils.Print(nt.Lookup("_fields").type))
self.assertEqual(pytd.AnythingType(), nt.Lookup("y").type)
self.assertEqual(pytd.AnythingType(), nt.Lookup("z").type)
getnewargs_sig, = nt.Lookup("__getnewargs__").signatures
self.assertEqual("Tuple[Any, Any]",
pytd_utils.Print(getnewargs_sig.return_type))
self.assertEqual("def __new__(cls: Type[_TX], y, z) -> _TX: ...",
pytd_utils.Print(nt.Lookup("__new__")))
def test_name(self):
# The generated name has to be different from the official name, or we'll
# end up with nonsense like X = X.
self.assertNotEqual("X", collections_overlay.namedtuple_name("X", []))
# Two namedtuple instances should have the same name iff the instances are
# the same.
self.assertNotEqual(collections_overlay.namedtuple_name("X", []),
collections_overlay.namedtuple_name("X", ["a"]))
self.assertNotEqual(collections_overlay.namedtuple_name("X", ["a"]),
collections_overlay.namedtuple_name("X", ["b"]))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23965 | # ©2020 Johns Hopkins University Applied Physics Laboratory LLC.
# Touchdown is a multiagent game in which both teams are trying to reach
# the opposing end of the play area
#if entities from opposite teams touch, they both respawn to their respective starting lines
#teams may independently have discrete or continuous action spaces, and vector or image observation spaces
import math, random, time
import numpy as np
import cv2
from arena5.core.utils import mpi_print
from gym.spaces import Box, Discrete
class TouchdownEnv():
def __init__(self, team_size, blue_obs="image", blue_actions="discrete", red_obs="image", red_actions="discrete",
clr1=(0,0,255.0), clr2=(255.0,0,0)):
self.team_size = team_size
self.blue_obs = blue_obs
self.blue_actions = blue_actions
self.red_obs = red_obs
self.red_actions = red_actions
self.start_pos = 0.9
self.capture_radius = 0.2
self.player_movement = 0.1
self.clr1 = clr1
self.clr2 = clr2
#track player movement in a normalized space
self.blue_team = []
for t in range(self.team_size):
self.blue_team.append([0.0, self.start_pos])
self.red_team = []
for t in range(self.team_size):
self.red_team.append([0.0, -self.start_pos])
self.all_players = self.blue_team + self.red_team
#obs spaces
self.observation_spaces = []
for bp in self.blue_team:
if self.blue_obs == "image":
self.observation_spaces.append(Box(-10, 10, (84,84,1)))
elif self.blue_obs =="vector":
self.observation_spaces.append(Box(-10, 10, (len(self.all_players)*2,)))
else:
raise ValueError
for rp in self.red_team:
if self.red_obs == "image":
self.observation_spaces.append(Box(-10, 10, (84,84,1)))
elif self.red_obs =="vector":
self.observation_spaces.append(Box(-10, 10, (len(self.all_players)*2,)))
else:
raise ValueError
#action spaces
self.action_spaces = []
for bp in self.blue_team:
if self.blue_actions == "discrete":
self.action_spaces.append(Discrete(5))
elif self.blue_actions == "continuous":
self.action_spaces.append(Box(-1.0, 1.0, (2,)))
else:
raise ValueError
for rp in self.red_team:
if self.red_actions == "discrete":
self.action_spaces.append(Discrete(5))
elif self.red_actions == "continuous":
self.action_spaces.append(Box(-1.0, 1.0, (2,)))
else:
raise ValueError
def dist(self, p1, p2):
dx = p1[0]-p2[0]
dy = p1[1]-p2[1]
return math.sqrt(dx*dx + dy*dy)
def reset(self):
self.blue_team = []
for t in range(self.team_size):
self.blue_team.append([0.0, self.start_pos])
self.red_team = []
for t in range(self.team_size):
self.red_team.append([0.0, -self.start_pos])
self.all_players = self.blue_team + self.red_team
self.epstep = 0
#get states and return
states = []
for i in range(len(self.all_players)):
states.append(self.get_state_for_player(i))
return states
def step(self, actions):
self.epstep += 1
for i in range(len(actions)):
self.step_player(actions[i], i)
#check for collisions (between teams)
for bp in self.blue_team:
for rp in self.red_team:
d = self.dist(bp, rp)
if d < self.capture_radius:
bp[0] = 0.0
bp[1] = self.start_pos
rp[0] = 0.0
rp[1] = -self.start_pos
#check for end-of-game
done = False
blue_reward = 0.0
for bp in self.blue_team:
if bp[1] < -1.0:
done = True
blue_reward += 1.0
for rp in self.red_team:
if rp[1] > 1.0:
done = True
blue_reward -= 1.0
done = done or self.epstep>=1500
rewards = [blue_reward for p in self.blue_team] + [-blue_reward for p in self.red_team]
infos = [{} for p in self.all_players]
#get states and return
states = []
for i in range(len(self.all_players)):
states.append(self.get_state_for_player(i))
return states, rewards, done, infos
def step_player(self, action, idx):
if idx < self.team_size:
#blue player
if self.blue_actions == "discrete":
action = action[0]
if action == 1:
self.all_players[idx][0] += self.player_movement
elif action == 2:
self.all_players[idx][0] -= self.player_movement
elif action == 3:
self.all_players[idx][1] += self.player_movement
elif action == 4:
self.all_players[idx][1] -= self.player_movement
elif self.blue_actions == "continuous":
self.all_players[idx][0] += action[0]*self.player_movement
self.all_players[idx][1] += action[1]*self.player_movement
#blue player cannot exceed y=1.0
if self.all_players[idx][1] > 1.0:
self.all_players[idx][1] = 1.0
else:
#red player - actions are all reversed
if self.red_actions == "discrete":
action = action[0]
if action == 1:
self.all_players[idx][0] += (-self.player_movement)
elif action == 2:
self.all_players[idx][0] -= (-self.player_movement)
elif action == 3:
self.all_players[idx][1] += (-self.player_movement)
elif action == 4:
self.all_players[idx][1] -= (-self.player_movement)
elif self.red_actions == "continuous":
self.all_players[idx][0] += action[0]*(-self.player_movement)
self.all_players[idx][1] += action[1]*(-self.player_movement)
#red player cannot preceed y=-1.0
if self.all_players[idx][1] < -1.0:
self.all_players[idx][1] = -1.0
#check for left-right walls
if self.all_players[idx][0] > 1.0:
self.all_players[idx][0] = 1.0
if self.all_players[idx][0] < -1.0:
self.all_players[idx][0] = -1.0
self.blue_team = self.all_players[:self.team_size]
self.red_team = self.all_players[self.team_size:]
def render(self):
img = np.ones((300, 300, 3), np.uint8)*255.0
#draw all blue
for bp in self.blue_team:
color = self.clr1
pix_x = int((bp[0]+1.0)*0.5*300.0)
pix_y = int((bp[1]+1.0)*0.5*300.0)
cv2.circle(img, (pix_x,pix_y), int(150*self.capture_radius), color, -1)
#draw all red
for rp in self.red_team:
color = self.clr2
pix_x = int((rp[0]+1.0)*0.5*300.0)
pix_y = int((rp[1]+1.0)*0.5*300.0)
cv2.circle(img, (pix_x,pix_y), int(150*self.capture_radius), color, -1)
#we actually are drawing in BGR, so flip last axis
img = np.flip(img, axis=-1)
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.imshow("img", img)
cv2.waitKey(10)
def get_state_for_player(self, entity_idx):
if entity_idx < self.team_size:
if self.blue_obs == "image":
return self.get_image_state_for_player(entity_idx)
elif self.blue_obs == "vector":
return self.get_vector_state_for_player(entity_idx)
else:
raise ValueError
else:
if self.red_obs == "image":
return self.get_image_state_for_player(entity_idx)
elif self.red_obs == "vector":
return self.get_vector_state_for_player(entity_idx)
else:
raise ValueError
# TODO: support vector states as well
def get_vector_state_for_player(self, entity_idx):
self_state = []
ally_states = []
enemy_states = []
for idx in range(len(self.all_players)):
if idx == entity_idx:
self_state += self.all_players[idx]
else:
if entity_idx < self.team_size:
if idx < self.team_size:
ally_states += self.all_players[idx]
else:
enemy_states += self.all_players[idx]
else:
if idx < self.team_size:
enemy_states += self.all_players[idx]
else:
ally_states += self.all_players[idx]
state = self_state + ally_states + enemy_states
return np.asarray(state)
def get_image_state_for_player(self, entity_idx):
img = np.zeros((84, 84, 1), np.int8)
#draw all blue
for bp in self.blue_team:
color = 1.0 if entity_idx<self.team_size else -1.0
pix_x = int((bp[0]+1.0)*0.5*84.0)
pix_y = int((bp[1]+1.0)*0.5*84.0)
cv2.circle(img, (pix_x,pix_y), 3, (color), -1)
#draw all red
for rp in self.red_team:
color = 1.0 if entity_idx>=self.team_size else -1.0
pix_x = int((rp[0]+1.0)*0.5*84.0)
pix_y = int((rp[1]+1.0)*0.5*84.0)
cv2.circle(img, (pix_x,pix_y), 3, (color), -1)
#now re-draw this entity
color = 2.0
p = self.all_players[entity_idx]
pix_x = int((p[0]+1.0)*0.5*84.0)
pix_y = int((p[1]+1.0)*0.5*84.0)
cv2.circle(img, (pix_x,pix_y), 3, (color), -1)
#flip
if entity_idx>=self.team_size:
img = np.flipud(img)
img = np.fliplr(img)
return img
|
the-stack_0_23966 | from twisted.trial import unittest
from twisted.internet import tcp, protocol
from nacl.signing import SigningKey
from nacl.exceptions import CryptoError
from .. import util, errors
class Utils(unittest.TestCase):
def test_split_into(self):
self.failUnlessEqual(util.split_into("ABBCCC", [1,2,3]),
["A","BB","CCC"])
self.failUnlessEqual(util.split_into("ABBCCC", [2,1], True),
["AB","B","CCC"])
self.failUnlessRaises(ValueError,
util.split_into, "ABBCCC", [2,1], False)
self.failUnlessRaises(ValueError,
util.split_into, "ABBCCC", [2,1]
)
def test_ascii(self):
b2a = util.to_ascii
a2b = util.from_ascii
for prefix in ("", "prefix-"):
for length in range(0, 100):
b1 = "a"*length
for base in ("base64", "base32", "base16", "hex"):
a = b2a(b1, prefix, base)
b2 = a2b(a, prefix, base)
self.failUnlessEqual(b1, b2)
self.failUnlessRaises(NotImplementedError, b2a, "a", encoding="none")
self.failUnlessRaises(NotImplementedError, a2b, "a", encoding="none")
def test_nonce(self):
n1 = util.make_nonce()
self.failUnlessEqual(len(n1), 52)
n2 = util.make_nonce()
self.failIfEqual(n1, n2) # not exhaustive
def test_equal(self):
self.failUnless(util.equal("a", "a"))
self.failIf(util.equal("a", "b"))
def test_x_or_none(self):
self.failUnlessEqual(util.hex_or_none(None), None)
self.failUnlessEqual(util.hex_or_none("A"), "41")
self.failUnlessEqual(util.unhex_or_none(None), None)
self.failUnlessEqual(util.unhex_or_none("42"), "B")
def test_remove_prefix(self):
self.failUnlessEqual(util.remove_prefix("v1:stuff", "v1:"), "stuff")
x = self.failUnlessRaises(util.BadPrefixError,
util.remove_prefix, "v2:stuff", "v1:")
self.failUnlessEqual(str(x), "did not see expected 'v1:' prefix")
x = self.failUnlessRaises(ValueError,
util.remove_prefix, "v2:stuff", "v1:",
ValueError)
self.failUnlessEqual(str(x), "did not see expected 'v1:' prefix")
class Signatures(unittest.TestCase):
def test_verify_with_prefix(self):
sk = SigningKey.generate()
vk = sk.verify_key
m = "body"
prefix = "prefix:"
sk2 = SigningKey.generate()
sm1 = sk.sign(prefix+m)
sm2 = sk.sign("not the prefix"+m)
sm3 = sk2.sign(prefix+m)
self.failUnlessEqual(util.verify_with_prefix(vk, sm1, prefix), m)
self.failUnlessRaises(errors.BadSignatureError,
util.verify_with_prefix, vk, sm2, prefix)
self.failUnlessRaises(CryptoError,
util.verify_with_prefix, vk, sm3, prefix)
class AllocatePort(unittest.TestCase):
def test_allocate(self):
port = util.allocate_port()
# and it should be possible to claim this port right away
p2 = tcp.Port(port, protocol.Factory())
p2.startListening()
port2 = p2.getHost().port
d = p2.stopListening()
def _stopped(res):
self.failUnlessEqual(port, port2)
return res
d.addBoth(_stopped)
return d
|
the-stack_0_23969 | from configparser import ConfigParser
from __init__ import Armando
from constants import Constants
import json
import os
import threading
class ConfigError(Exception):
pass
class Config(object):
"""
Configuration parser for Armando main.conf format
@author: Fabio "BlackLight" Manganiello <[email protected]>
"""
__config = None
__config_lock = threading.RLock()
######
# Private methods
######
def __parse_rc_file(self, rcfile):
parser = ConfigParser()
with open(rcfile) as fp:
parser.read_file(fp)
for section in parser.sections():
# Ignore sections having enabled = False
if parser.has_option(section, 'enabled') and parser.getboolean(section, 'enabled') is False:
continue
# Case insensitive mapping - [logger]\nlevel=INFO in config becomes
# self.config['logger.level'] = 'INFO'
for key, value in parser.items(section):
key = ('%s.%s' % (section, key)).lower()
value = Constants.expand_value(value)
self.config[key] = value
def __init__(self, rcfile=None):
"""
Configuration constructor taking as argument:
rcfile -- Path string to the configuration file (default: __BASEDIR__/main.conf,
which can be locally overridden by __PWD__/main.conf)
"""
self.config = {}
rcfile_found = False
# If no rcfile is provided, we read __BASEDIR__/main.conf,
# which can be overriden by your local share/YourProject/main.conf
if rcfile is None:
try:
self.__parse_rc_file(Armando.get_base_dir() + os.sep + 'main.conf')
rcfile_found = True
self.__parse_rc_file(os.getcwd() + os.sep + 'main.conf')
except FileNotFoundError as e:
if rcfile_found is False:
raise e
else:
self.__parse_rc_file(rcfile)
if len(self.config.items()) == 0:
raise RuntimeError( \
'No configuration has been loaded - both %s/main.conf and ./main.conf files' \
'were not found or are invalid' % (Armando.get_base_dir()))
######
# Public methods
######
@classmethod
def get_config(cls, rcfile=None):
"""
Thread-safe singleton to access or initialize the static default configuration object
"""
cls.__config_lock.acquire()
try:
if cls.__config is None:
cls.__config = Config(rcfile)
finally:
cls.__config_lock.release()
return cls.__config
def get(self, attr):
"""
Configuration getter
attr -- Attribute name - note that we are case insensitive when it comes to attribute names
"""
attr = attr.lower()
return self.config[attr] if attr in self.config else None
def dump(self):
" Dump the configuration object in JSON format "
return json.dumps(self.config)
# vim:sw=4:ts=4:et:
|
the-stack_0_23970 | from dataclasses import dataclass
from re import match
from datetime import datetime
from durand import Node, Variable, DatatypeEnum
def datetime_to_time(d: datetime):
return d.strftime('%I:%M') + ('AM' if d.hour < 12 else 'PM')
def datetime_to_date(d: datetime):
return d.strftime('%m-%d-%Y')
@dataclass
class FileInfo:
FileName: str = 'python_durand_device.eds'
FileVersion: int = 0
FileRevision: int = 0
EDSVersion: str = '4.0'
Description: str = None
CreationTime: str = None
CreationDate: str = None
CreatedBy: str = None
ModificationTime: str = None
ModificationDate: str = None
ModifiedBy: str = None
Comment: str = None
def validate(self):
if not 0 <= self.FileVersion <= 255:
raise ValueError('FileVersion is Unsigned8')
if not 0 <= self.FileRevision <= 255:
raise ValueError('FileRevision is Unsigned8')
if not match('[d].[d]', self.EDSVersion):
raise ValueError('EDSVersion type mismatch')
if self.CreationDate:
try:
datetime.strptime('%m-%d-%Y', self.CreationDate)
except ValueError:
raise ValueError('CreationDate format invalid')
if self.CreationTime:
if len(self.CreationTime) != 7 or self.CreationTime[5:] not in ('AM', 'PM'):
raise ValueError('CreationTime format invalid')
try:
datetime.strptime('%I:%M', self.CreationTime[:6])
except ValueError:
raise ValueError('CreationTime format invalid')
if self.ModificationDate:
try:
datetime.strptime('%m-%d-%Y', self.ModificationDate)
except ValueError:
raise ValueError('ModificationDate format invalid')
if self.ModificationTime:
if len(self.ModificationTime) != 7 or self.ModificationTime[5:] not in ('AM', 'PM'):
raise ValueError('ModificationTime format invalid')
try:
datetime.strptime('%I:%M', self.ModificationTime[:6])
except ValueError:
raise ValueError('ModificationTime format invalid')
class EDS:
def __init__(self):
self.FileInfo = FileInfo()
class EDSProvider:
def __init__(self, node: Node, eds: EDS = None):
self._node = node
self.eds = eds or EDS()
eds_store_variable = Variable(0x1021, 0, DatatypeEnum.DOMAIN, 'ro')
self._node.object_dictionary.add_object(eds_store_variable)
self._node.object_dictionary.set_read_callback(eds_store_variable, self.generate)
|
the-stack_0_23971 | from lxml import etree as ET
from urllib.request import urlopen
from time import sleep
import re
import csv
from threading import Thread
# ---------------------- Fields --------------------------------------
fields = ["year",
"term",
"college",
"subject",
"subject_name",
"number",
"name",
"description",
"credit_hours",
"gen_ed",
"gen_ed_name",
"crn",
"section",
"section_info",
"section_notes",
"section_attributes",
"section_capp_area",
"section_co_request",
"section_special_approval",
"part_of_term",
"start_date",
"end_date",
"meeting",
"type",
"type_name",
"start_time",
"end_time",
"days",
"room",
"building",
"instructor"]
# ---------------------- Helper functions -----------------------------
def url_open(url):
retrycount = 0
s = None
while s is None:
try:
s = urlopen(url, timeout=50)
except:
print(url)
retrycount += 1
if retrycount > 6:
raise
sleep(2)
return s
def text_or_none(xml, find, pattern=None, attrib=None):
if xml.find(find) is not None:
text = xml.find(find)
if attrib is not None:
text = text.attrib[attrib]
else:
text = text.text
if pattern is not None:
match = re.match(pattern, text)
if match is not None:
return match.group(1) or None
return None
return text or None
return None
def build_url(*args):
url = "https://courses.illinois.edu/cisapp/explorer/schedule"
for arg in args:
url += "/" + str(arg)
return url + ".xml"
# ---------------------- Get Semester Data -----------------------------
def write_semester_csv(year, term):
row = {
"year": str(year),
"term": term.capitalize()
}
writer = csv.DictWriter(open("data/raw/{}-{}.csv".format(row["year"],row["term"]), "w+", newline='', encoding='utf-8'), fieldnames=fields)
writer.writeheader()
url = build_url(row["year"], row["term"].lower())
for subject in ET.parse(url_open(url)).iter("subject"):
row["subject"] = subject.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"])
subject_info = ET.parse(url_open(url))
row["college"] = text_or_none(subject_info, "collegeCode")
row["subject_name"] = text_or_none(subject_info, "label")
print("Getting {} {} {}...".format(row["year"], row["term"], row["subject"]))
for course in subject_info.iter("course"):
row["number"] = course.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"], row["number"])
course_info = ET.parse(url_open(url))
row["name"] = text_or_none(course_info, "label")
row["description"] = text_or_none(course_info, "description")
row["credit_hours"] = text_or_none(course_info, "creditHours")
row["section_attributes"] = text_or_none(course_info, "sectionDegreeAttributes")
for section in course_info.iter("section"):
row["crn"] = section.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"], row["number"], row["crn"])
section_info = ET.parse(url_open(url))
row["section"] = text_or_none(section_info, "sectionNumber")
row["section_info"] = text_or_none(section_info, "sectionText")
row["section_notes"] = text_or_none(section_info, "sectionNotes")
row["section_capp_area"] = text_or_none(section_info, "sectionCappArea")
row["section_attributes"] = row["section_attributes"] or text_or_none(section_info, "sectionDegreeAttributes")
row["section_co_request"] = text_or_none(section_info, "sectionCoRequest")
row["section_special_approval"] = text_or_none(section_info, "specialApproval")
row["part_of_term"] = text_or_none(section_info, "partOfTerm")
row["start_date"] = text_or_none(section_info, "startDate")
row["end_date"] = text_or_none(section_info, "endDate")
for meeting in section_info.iter("meeting"):
row["meeting"] = meeting.attrib["id"]
row["type"] = text_or_none(meeting, "type", attrib="code")
row["type_name"] = text_or_none(meeting, "type")
row["days"] = text_or_none(meeting, "daysOfTheWeek")
row["room"] = text_or_none(meeting, "roomNumber")
row["start_time"] = text_or_none(meeting, "start")
row["end_time"] = text_or_none(meeting, "end")
row["building"] = text_or_none(meeting, "buildingName")
instructors = meeting.iter("instructor")
if next(meeting.iter("instructor"),None) is None:
instructors = [None]
for instructor in instructors:
row["instructor"] = instructor if instructor is None else instructor.text
categories = course_info.find("genEdCategories")
if categories is not None:
for cat in categories.iter("category"):
for genEd in cat.find("{http://rest.cis.illinois.edu}genEdAttributes").iter("genEdAttribute"):
row["gen_ed"] = genEd.attrib["code"]
row["gen_ed_name"] = genEd.text
writer.writerow(row)
else:
row["gen_ed"] = None
writer.writerow(row)
# Get past semesters
if __name__ == "__main__":
threads = []
for year in ET.parse(url_open("https://courses.illinois.edu/cisapp/explorer/schedule.xml")).iter("calendarYear"):
for term in ET.parse(url_open(year.attrib["href"])).iter("term"):
thread = Thread(target=write_semester_csv, args=(year.text, term.text[:-5]))
thread.start()
threads.append(thread)
for thread in threads:
thread.join() |
the-stack_0_23972 | #!/usr/bin/env python
#
# Electrum - lightweight ILCOIN client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import time
from enum import IntEnum
from typing import Tuple, TYPE_CHECKING
from PyQt5.QtCore import Qt, pyqtSignal, QThread
from PyQt5.QtWidgets import (QTreeWidget, QTreeWidgetItem, QMenu, QGridLayout, QComboBox,
QLineEdit, QDialog, QVBoxLayout, QHeaderView, QCheckBox,
QTabWidget, QWidget, QLabel)
from PyQt5.QtGui import QFontMetrics
from electrum.i18n import _
from electrum import constants, blockchain, util
from electrum.interface import ServerAddr, PREFERRED_NETWORK_PROTOCOL
from electrum.network import Network
from electrum.logging import get_logger
from .util import (Buttons, CloseButton, HelpButton, read_QIcon, char_width_in_lineedit,
PasswordLineEdit)
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
_logger = get_logger(__name__)
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 500)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
util.register_callback(self.on_network, ['network_updated'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
"""List of connected servers."""
SERVER_ADDR_ROLE = Qt.UserRole + 100
CHAIN_ID_ROLE = Qt.UserRole + 101
ITEMTYPE_ROLE = Qt.UserRole + 102
class ItemType(IntEnum):
CHAIN = 0
CONNECTED_SERVER = 1
DISCONNECTED_SERVER = 2
TOPLEVEL = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent # type: NetworkChoiceLayout
self.setHeaderLabels([_('Server'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
item_type = item.data(0, self.ITEMTYPE_ROLE)
menu = QMenu()
if item_type == self.ItemType.CONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
elif item_type == self.ItemType.DISCONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
def func():
self.parent.server_e.setText(server.net_addr_str())
self.parent.set_server()
menu.addAction(_("Use as server"), func)
elif item_type == self.ItemType.CHAIN:
chain_id = item.data(0, self.CHAIN_ID_ROLE)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(chain_id))
else:
return
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, *, network: Network, servers: dict, use_tor: bool):
self.clear()
# connected servers
connected_servers_item = QTreeWidgetItem([_("Connected nodes"), ''])
connected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
chains = network.get_blockchains()
n_chains = len(chains)
for chain_id, interfaces in chains.items():
b = blockchain.blockchains.get(chain_id)
if b is None: continue
name = b.get_name()
if n_chains > 1:
x = QTreeWidgetItem([name + '@%d'%b.get_max_forkpoint(), '%d'%b.height()])
x.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CHAIN)
x.setData(0, self.CHAIN_ID_ROLE, b.get_id())
else:
x = connected_servers_item
for i in interfaces:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([f"{i.server.net_addr_str()}" + star, '%d'%i.tip])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, i.server)
item.setToolTip(0, str(i.server))
x.addChild(item)
if n_chains > 1:
connected_servers_item.addChild(x)
# disconnected servers
disconnected_servers_item = QTreeWidgetItem([_("Other known servers"), ""])
disconnected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
connected_hosts = set([iface.host for ifaces in chains.values() for iface in ifaces])
protocol = PREFERRED_NETWORK_PROTOCOL
for _host, d in sorted(servers.items()):
if _host in connected_hosts:
continue
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
item = QTreeWidgetItem([server.net_addr_str(), ""])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.DISCONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, server)
disconnected_servers_item.addChild(item)
self.addTopLevelItem(connected_servers_item)
self.addTopLevelItem(disconnected_servers_item)
connected_servers_item.setExpanded(True)
for i in range(connected_servers_item.childCount()):
connected_servers_item.child(i).setExpanded(True)
disconnected_servers_item.setExpanded(True)
# headers
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
super().update()
class NetworkChoiceLayout(object):
def __init__(self, network: Network, config: 'SimpleConfig', wizard=False):
self.network = network
self.config = config
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(proxy_tab, _('Proxy'))
fixed_width_hostname = 24 * char_width_in_lineedit()
fixed_width_port = 6 * char_width_in_lineedit()
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(fixed_width_hostname)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(fixed_width_port)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = PasswordLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setFixedWidth(fixed_width_port)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(read_QIcon("tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 1, 0, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.server_e = QLineEdit()
self.server_e.setFixedWidth(fixed_width_hostname + fixed_width_port)
self.server_e.editingFinished.connect(self.set_server)
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 2, 0)
grid.addWidget(self.server_e, 2, 1, 1, 3)
grid.addWidget(HelpButton(msg), 2, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 3, 0)
grid.addWidget(self.height_label, 3, 1)
grid.addWidget(HelpButton(msg), 3, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 4, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 6, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_e.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_e, self.nodes_list_widget]:
w.setEnabled(False)
def update(self):
net_params = self.network.get_parameters()
server = net_params.server
auto_connect = net_params.auto_connect
if not self.server_e.hasFocus():
self.server_e.setText(server.to_friendly_name())
self.autoconnect_cb.setChecked(auto_connect)
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n > 1 else _("Connected to {0} node.").format(n) if n == 1 else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains) > 1:
chain = self.network.blockchain()
forkpoint = chain.get_max_forkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(forkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(network=self.network,
servers=self.network.get_servers(),
use_tor=self.tor_cb.isChecked())
self.enable_set_server()
def fill_in_proxy_settings(self):
proxy_config = self.network.get_parameters().proxy
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def follow_branch(self, chain_id):
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
self.update()
def follow_server(self, server: ServerAddr):
self.network.run_from_another_thread(self.network.follow_chain_given_server(server))
self.update()
def accept(self):
pass
def set_server(self):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(str(self.server_e.text()))
if not server: raise Exception("failed to parse")
except Exception:
return
net_params = net_params._replace(server=server,
auto_connect=self.autoconnect_cb.isChecked())
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_proxy(self):
net_params = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
net_params = net_params._replace(proxy=proxy)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def suggest_proxy(self, found_proxy):
if found_proxy is None:
self.tor_cb.hide()
return
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if (self.proxy_cb.isChecked()
and self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5')
and self.proxy_host.text() == "127.0.0.1"
and self.proxy_port.text() == str(found_proxy[1])):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
_logger.info("can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
while True:
for p in ports:
net_addr = ("127.0.0.1", p)
if TorDetector.is_tor_port(net_addr):
self.found_proxy.emit(net_addr)
break
else:
self.found_proxy.emit(None)
time.sleep(10)
@staticmethod
def is_tor_port(net_addr: Tuple[str, int]) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(0.1)
s.connect(net_addr)
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
|
the-stack_0_23973 | import sys
sys.path.append('../Grupo1/Instrucciones')
sys.path.append('../Grupo1/Utils')
sys.path.append('../Grupo1/Expresiones')
from instruccion import *
from Error import *
from Primitivo import *
class Logicas(Instruccion):
def __init__(self, leftOperator, rightOperator, sign):
self.leftOperator = leftOperator
self.rightOperator = rightOperator
self.sign = sign
def execute(self, data, valoresTabla):
#Execution of the arguments
try:
left = self.leftOperator.execute()
except:
try:
left = self.leftOperator.executeInsert(data, valoresTabla)
except:
left = self.leftOperator.execute(data, valoresTabla)
try:
right = self.rightOperator.execute()
except:
try:
right = self.rightOperator.executeInsert(data, valoresTabla)
except:
right = self.rightOperator.execute(data, valoresTabla)
#checking returns of both arguments in case of error
if isinstance(left, Error) :
return left
if isinstance(right, Error) :
return right
#execution of logic conditions
if self.sign == "and" :
return left and right
else :
'or'
return left or right
def repr(self):
return str(self.__dict__)
|
the-stack_0_23974 | import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=(2048, 1024),
img_ratios=[0.5, 1.0],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]): Images scales for resizing.
img_ratios (float | list[float]): Image ratios for resizing
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
img_ratios=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
if img_ratios is not None:
# mode 1: given a scale and a range of image ratio
img_ratios = img_ratios if isinstance(img_ratios,
list) else [img_ratios]
assert mmcv.is_list_of(img_ratios, float)
assert isinstance(img_scale, tuple) and len(img_scale) == 2
self.img_scale = [(int(img_scale[0] * ratio),
int(img_scale[1] * ratio))
for ratio in img_ratios]
else:
# mode 2: given multiple scales
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_aug = [False, True] if self.flip else [False]
for scale in self.img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
@PIPELINES.register_module()
class SingleScaleFourFlipAug(MultiScaleFlipAug):
def __init__(self,
transforms,
img_scale,
flip_lr=True, flip_ud=True):
super().__init__(transforms, img_scale=img_scale, img_ratios=[1.], flip=flip_lr)
self.flip_ud = flip_ud
def __call__(self, results):
aug_data = []
flip_lrs = [False, True] if self.flip else [False]
flip_uds = [False, True] if self.flip_ud else [False]
for flip_lr in flip_lrs:
for flip_ud in flip_uds:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = self.img_scale[0]
_results['flip'] = flip_lr
_results['flip_direction'] = direction
_results['flip_ud'] = flip_ud
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
|
the-stack_0_23979 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class FrontResourcesReqVo(object):
def __init__(self, serviceCodes, resourceIds=None, tagFilters=None, showTagStatus=None, pageSize=None, currentPage=None):
"""
:param serviceCodes: 产品线名称列表, 目前只支持查询同一条产品线的资源
标签系统支持的产品线名称如下
- vm disk sqlserver es mongodb ip
- memcached redis drds rds database db_ro
- percona percona_ro mariadb mariadb_ro pg cdn
- nativecontainer pod zfs jqs kubernetesNodegroup jcq
:param resourceIds: (Optional) 资源id列表
:param tagFilters: (Optional) 标签过滤列表
:param showTagStatus: (Optional) 控制标签显示参数, 默认为0
0: 只显示普通用户标签
1: 显示系统标签和普通用户标签
:param pageSize: (Optional) 每页记录数大小, 默认为20条记录每页, 上限为500条记录每页
:param currentPage: (Optional) 当前页码, 默认为第一页
"""
self.serviceCodes = serviceCodes
self.resourceIds = resourceIds
self.tagFilters = tagFilters
self.showTagStatus = showTagStatus
self.pageSize = pageSize
self.currentPage = currentPage
|
the-stack_0_23982 | import re
n=int(input())
for k in range(0,n):
data=input()
findValidData=re.compile(r"(?P<Boss>\|[A-Z]{4,}\|):(?P<Title>#[A-Za-z]+\s[A-Za-z]+#)")
occurrences=findValidData.findall(data)
if len(occurrences)==0:
print("Access denied!")
else:
bossName=occurrences[0][0].split("|")
bossName=bossName[1]
title=occurrences[0][1].split("#")
title=title[1]
Strength=len(bossName)
Armor=occurrences[0][1].split("#")
Armor=Armor[1]
Armor=len(Armor)
print(f"{bossName}, The {title}\n>> Strength: {Strength}\n>> Armor: {Armor}")
|
the-stack_0_23983 | # Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
__author__ = "Lei Cai"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "[email protected]"
__docformat__ = "reStructureText"
import geospacelab.visualization.mpl as mpl
import geospacelab.datahub as datahub
from geospacelab.visualization.mpl.geomap.geopanels import PolarMapPanel
default_layout_config = {
'left': 0.15,
'right': 0.8,
'bottom': 0.15,
'top': 0.88,
'hspace': 0.1,
'wspace': 0.1
}
default_figure_config = {
'figsize': (12, 12), # (width, height)
'dpi': 100,
}
class GeoDashboard(mpl.Dashboard):
def __init__(self, **kwargs):
figure = kwargs.pop('figure', 'new')
figure_config = kwargs.pop('figure_config', default_figure_config)
super().__init__(visual='on', figure_config=figure_config, figure=figure, **kwargs)
def set_layout(self, num_rows=None, num_cols=None, left=None, right=None, bottom=None, top=None,
hspace=None, wspace=None, **kwargs):
if left is None:
left = default_layout_config['left']
if right is None:
right = default_layout_config['right']
if bottom is None:
bottom = default_layout_config['bottom']
if top is None:
top = default_layout_config['top']
if hspace is None:
hspace = default_layout_config['hspace']
if hspace is None:
wspace = default_layout_config['wspace']
super().set_layout(num_rows=num_rows, num_cols=num_cols, left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace, **kwargs)
def add_polar_map(self, row_ind, col_ind, label=None, cs='GEO', style='lon-fixed', pole='N',
ut=None, lon_c=None, lst_c=None, mlt_c=None, mlon_c=None, boundary_lat=30.,
boundary_style='circle', mirror_south=False,
proj_type='Stereographic', **kwargs):
"""
:param pole:
:param row_ind: the row
:param style: 'lon-fixed', 'lst-fixed', 'mlt-fixed' or 'mlon-fixed'
:return:
"""
panel = super().add_panel(row_ind=row_ind, col_ind=col_ind, panel_class=PolarMapPanel,
label=label, cs=cs, style=style, pole=pole,
ut=ut, lon_c=lon_c, lst_c=lst_c, mlt_c=mlt_c, mlon_c=mlon_c, boundary_lat=boundary_lat,
boundary_style=boundary_style,
mirror_south=mirror_south,
proj_type=proj_type, **kwargs)
return panel
|
the-stack_0_23984 | # -*- coding: utf-8 -*-
import os
import re
import json
import datetime as dt
import logging
from html.parser import HTMLParser
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.shortcuts import render # render_to_response
# from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
# # from django.conf import settings as SETTINGS
# from django.contrib.auth.views import redirect_to_login
from .forms import UploadFileForm
from .utils import handle_uploaded_file, handle_download_file
from .models import ProjectID, TextBlob
LOGGER = logging.getLogger(__name__)
#
# View methods
#
def view(request, project_id_array=None, reviewed=True):
blobs = TextBlob.objects
if project_id_array:
blobs = blobs.filter(project_id__in=project_id_array)
if reviewed:
blobs = blobs.filter(reviewed=True)
return render(request, 'html_checker/viewer.html',
{'blobs': blobs,}
)
def add_note(request):
h = HTMLParser()
pid = request.GET.get('id', -1)
note = h.unescape(request.GET.get('note', ''))
try:
pid = int(pid)
except ValueError:
pid = -1
tb = TextBlob.objects.filter(pk=pid).first()
if not tb:
return HttpResponse('{"success":false, "id": '+pid+'}',
content_type="application/json")
tb.notes = note
tb.save()
return HttpResponse(json.dumps({'success':True, 'id':pid, 'note':note}),
content_type="application/json")
@csrf_protect
def upload(request):
"""
Upload file to add data to database.
Can be json file of format:
[{'cik': 20, 'accession': 'xxx',...}, ...]
or an excel file, with the first row being the field names:
cik | accession | text
-----+-------------+-------------------------------------------------------------
20 | xxxxxxxxxxx | It was the best of websites, it was the worst of websites.
Don't add any fields in your json/excel that aren't in the database.
This isn't robust, after all.
"""
if request.method == 'POST':
import pandas as pd
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
project_name = form.cleaned_data['project_name']
pid, created = ProjectID.objects.get_or_create(project=project_name)
df = handle_uploaded_file(form.cleaned_data['upload_type'],
request.FILES['upload_file'])
LOGGER.info("UPLOAD: {} project: {} with {} rows uploaded.".format(
"New" if created else "Old", pid, len(df)))
# Format dates to be datetime objects
for c in ('filedate', 'datadate'):
if c in df:
df[c] = pd.to_datetime(df[c])
TextBlob.objects.bulk_create(
[TextBlob(project=pid, **row) for row in df.to_dict('records')])
# next_url = reverse('html_checker:index')
next_url = '/admin/html_checker/projectid/{}/'.format(pid.id)
return HttpResponseRedirect(next_url)
else:
form = UploadFileForm()
c = {}
# c.update(csrf(request))
c['form'] = form
c['next'] = 'upload'
return render(request, 'html_checker/upload.html', c)
def download(request):
blobs = (TextBlob.objects
.filter(reviewed=1))
resp = HttpResponse(content_type='application/vnd.ms-excel')
resp['Content-Disposition'] = 'attachment; filename="time_series_packages.tsv"'
resp['Content-Disposition'] = 'attachment; filename=ra_website_data.xlsx'
resp.write(handle_download_file(blobs))
return resp
|
the-stack_0_23987 |
import os
import glob
import json
import argparse
from utils.utils import calc_mean_score, save_json
from handlers.model_builder import Nima
from handlers.data_generator import TestDataGenerator
def image_file_to_json(img_path):
img_dir = os.path.dirname(img_path)
#img_id = os.path.basename(img_path).split('.')[0]
img_id = os.path.basename(img_path)[:-4]
return img_dir, [{'image_id': img_id}]
def image_dir_to_json(img_dir, img_type='jpg'):
img_paths = glob.glob(os.path.join(img_dir, '*.'+img_type))
samples = []
for img_path in img_paths:
#img_id = os.path.basename(img_path).split('.')[0]
img_id = os.path.basename(img_path)[:-4]
samples.append({'image_id': img_id})
return samples
def predict(model, data_generator):
#return model.predict_generator(data_generator, workers=4, use_multiprocessing=True, verbose=1)
return model.predict_generator(data_generator, use_multiprocessing=False, verbose=1)
def main(base_model_name, weights_file, image_source, predictions_file, img_format='jpg'):
# load samples
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_dir, img_type='jpg')
print('base_model_name: ', base_model_name)
print('weights_file: ', weights_file)
print('image_source: ', image_source)
print('image_dir: ', image_dir)
# build model and load weights
nima = Nima(base_model_name, weights=None)
nima.build()
nima.nima_model.load_weights(weights_file)
# initialize data generator
data_generator = TestDataGenerator(samples, image_dir, 64, 10, nima.preprocessing_function(),
img_format=img_format)
# get predictions
predictions = predict(nima.nima_model, data_generator)
# calc mean scores and add to samples
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
os.rename(os.path.join(image_dir,sample['image_id']+'.jpg'),\
os.path.join(image_dir, \
str(sample['mean_score_prediction'])+sample['image_id']+'.jpg'))
print(json.dumps(samples, indent=2))
if predictions_file is not None:
save_json(samples, predictions_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--base-model-name', help='CNN base model name', required=False, \
default='MobileNet' )
parser.add_argument('-w', '--weights-file', help='path of weights file', required=False, \
# default='../models/MobileNet/weights_mobilenet_technical_0.11.hdf5')
default='../models/MobileNet/weights_mobilenet_aesthetic_0.07.hdf5')
parser.add_argument('-is', '--image-source', help='image directory or file', required=True)
parser.add_argument('-pf', '--predictions-file', help='file with predictions', required=False, default=None)
args = parser.parse_args()
main(**args.__dict__)
|
the-stack_0_23990 | """
.. _tut-mayer:
Mayer Wave Parametrisation
==========================
.. sidebar:: Cite the FOOOF authors
This tutorial is only possible due to the excellent and generous work of
the FOOOF authors. Please visit their site https://fooof-tools.github.io/fooof.
Also be sure to cite their various papers:
Donoghue, Thomas, et al. "Parameterizing neural power spectra into periodic and aperiodic components." Nature neuroscience 23.12 (2020): 1655-1665.
Donoghue, Thomas, Julio Dominguez, and Bradley Voytek. "Electrophysiological frequency band ratio measures conflate periodic and aperiodic neural activity." Eneuro 7.6 (2020).
Mayer waves are spontaneous oscillations in arterial blood pressure with a
frequency of ~0.1 Hz (Ghali and Ghali, 2020; Julien, 2006; Yucel, 2016).
Mayer waves are not easily removed from hemodynamic signatures of brain
activity as they tend to occur on a time course often confounded with
the frequency of a sensory task, for example, and/or the
cortical hemodynamic response to that task.
This example demonstrates how to use the
Fitting Oscillations & One Over F (FOOOF)
:footcite:`donoghue2020parameterizing`
method to quanitfy Mayer wave parameters in fNIRS data.
This is based on the description provided in
:footcite:`luke2021characterization`.
This tutorial is heavily based on the tutorials provided by the FOOOF
authors over at https://fooof-tools.github.io/fooof.
You should read their excellent documentation. Their work should be considered
the primary resource, and this is just an example of how to apply it to fNIRS
data for the purpose of extracting Mayer waves oscillation parameters.
.. contents:: Page contents
:local:
:depth: 2
"""
# Authors: Robert Luke <[email protected]>
#
# License: BSD (3-clause)
import os
import mne
import numpy as np
import matplotlib.pyplot as plt
from mne.preprocessing.nirs import optical_density, beer_lambert_law
from mne.time_frequency import psd_welch
from mne_nirs.channels import get_long_channels
from mne_nirs.preprocessing import quantify_mayer_fooof
from fooof import FOOOF
# %%
# Import and preprocess data
# --------------------------
#
# We read in the data and convert to haemoglobin concentration.
fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw = mne.io.read_raw_nirx(fnirs_raw_dir, verbose=True).load_data()
raw = optical_density(raw)
raw.resample(1.5)
raw = beer_lambert_law(raw, ppf=0.1)
raw = raw.pick(picks="hbo")
raw = get_long_channels(raw, min_dist=0.025, max_dist=0.045)
raw
# %%
# Process data with FOOOF
# -----------------------
#
# Next we estimate the power spectral density of the data and pass this to
# the FOOOF algorithm.
#
# I recommend using the FOOOF algorithm as provided by the authors rather
# than reimplementation or custom plotting etc. Their code is of excellent
# quality, well maintained, thoroughly documented, and they have considered
# many edge cases.
#
# Below we plot the spectrum of the data, the FOOOF fit of oscillations,
# and aperiodic component. Note the bump at 0.1 Hz that reflects the Mayer
# wave activity.
#
# Note that the activity is not a perfect peak at 0.1 Hz, but is spread
# across neighbouring frequencies. Additionally, the peak does not occur
# at exactly 0.1 Hz, but instead seems to peak at approximately 0.09 Hz.
# The shaded area illustrates the oscillation fitted by the FOOOF algorithm,
# it matches well to the data.
def scale_up_spectra(spectra, freqs):
"""
FOOOF requires the frequency values to be higher than the fNIRS data
permits, so we scale the values up by 10 here, and then will scale
the frequency values down by 10 later.
"""
freqs = freqs * 10
return spectra, freqs
# Prepare data for FOOOF
spectra, freqs = psd_welch(raw, fmin=0.001, fmax=1, tmin=0, tmax=None, n_overlap=300, n_fft=600)
spectra, freqs = scale_up_spectra(spectra, freqs)
# Specify the model, note that frequency values here are times 10
fm = FOOOF(peak_width_limits=(0.5, 12.0))
# Set the frequency range to fit the model, again these are times 10
freq_range = [0.001, 7]
fm.fit(freqs, np.mean(spectra, axis=0), freq_range)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
fm.plot(plot_peaks='shade', data_kwargs={'color': 'orange'}, ax=axs)
# Correct for x10 scaling above
plt.xticks([0, 1, 2, 3, 4, 5, 6], [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]);
# %%
# Use MNE-NIRS to quantify Mayer wave oscillation
# -----------------------------------------------
#
# MNE-NIRS provides a convenient function to estimate the Mayer wave
# parameters that takes care of the frequency scaling and selects the component
# most likely associated with the Mayer wave. It returns this data in a pandas
# dataframe for your convenience.
# It uses the FOOOF algorithm under the hood, so ensure you cite the original
# authors if you use this function.
quantify_mayer_fooof(raw.pick("hbo"), extra_df_fields={"Study": "Online tutorial"})
# %%
# Conclusion
# ----------
#
# We have demonstrated how to use the FOOOF algorithm for quantifying Mayer
# wave parameters, and highlighted the `quantify_mayer_fooof` for conveniently
# applying this analysis to fNIRS data with MNE-NIRS.
#
# An example measurement illustrated what the presence of a Mayer wave
# looks like with a power spectral density. The measurement also illustrated that the Mayer wave
# is not a perfect sinusoid, as evidenced by the broad spectral content.
# Further, the example illustrated that the Mayer wave is not always precisely locked
# to 0.1 Hz, both visual inspection and FOOOF quantification indicate a 0.09 Hz
# centre frequency.
#
# See the article Luke (2021) :footcite:`luke2021characterization` for further
# details on this analysis approach, and normative data from over 300 fNIRS
# measurements. This article also demonstrates that using short-channel
# systemic component correction algorithms can reduce the Mayer wave component
# in the signal (see also Yucel 2016).
# See both the
# :ref:`GLM tutorial <tut-fnirs-hrf>`
# and
# :ref:`signal enhancement tutorial <tut-fnirs-enhance>`
# for how to use short channels in either a GLM or averaging analysis with MNE-NIRS.
# %%
# Bibliography
# -----------------------------------------------
#
# .. footbibliography::
|
the-stack_0_23991 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import binascii
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
import eth_abi
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar, QSplitter)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS, b58_address_to_hash160, Token, opcodes, \
TYPE_SCRIPT, is_hash160, hash_160, eth_abi_encode
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.lnutil import PaymentFailure, SENT, RECEIVED
from electrum.transaction import Transaction, TxOutput, contract_script, is_opcall_script, is_opcreate_script
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.util import PR_PAID, PR_UNPAID, PR_INFLIGHT, PR_FAILED
from electrum.util import pr_expiration_values
from electrum.plugins.trezor.trezor import TrezorKeyStore
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .token_dialog import TokenAddDialog, TokenInfoDialog, TokenSendDialog
from .smart_contract_dialog import ContractCreateDialog, ContractEditDialog, ContractFuncDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
self.tokens_tab = self.create_tokens_tab()
self.smart_contract_tab = self.create_smart_contract_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.tokens_tab, read_QIcon("tab_contacts.png"), _('Tokens'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
add_optional_tab(tabs, self.smart_contract_tab, read_QIcon("tab_console.png"), _('Smart Contract'), 'contract')
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status', 'on_token']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Tachacoin Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Tachacoin Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_token(self):
self.token_hist_model.refresh('fx_token')
self.token_hist_list.update()
self.token_balance_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'on_token':
self.on_fx_token()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Tachacoin Electrum Testnet" if constants.net.TESTNET else "Tachacoin Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend TACHACOINs with it."),
_("Make sure you own the seed phrase or the private keys, before you request TACHACOINs to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Tachacoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Tachacoin Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
try:
addr_type, __ = b58_address_to_hash160(self.addresses[0])
except:
addr_type = constants.net.SEGWIT_HRP
if not isinstance(self.wallet.keystore, TrezorKeyStore) and addr_type == constants.net.ADDRTYPE_P2PKH and not self.wallet.is_watching_only():
token_menu = wallet_menu.addMenu(_("&Token"))
token_menu.addAction(_("Add Token"), lambda: self.token_add_dialog())
smart_cotract_menu = wallet_menu.addMenu(_("&Smart Contract"))
smart_cotract_menu.addAction(_("Add Contract"), lambda: self.contract_add_dialog())
smart_cotract_menu.addAction(_("Create Contract"), lambda: self.contract_create_dialog())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
add_toggle_action(view_menu, self.smart_contract_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Tachacoin Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.wallet.has_lightning():
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://github.com/tachacoin/tachacoin-electrum/"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('tachacoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Tachacoin Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("This software is based on Electrum to support Tachacoin.") + " " +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Tachacoin Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Tachacoin Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Tachacoin Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False, num_zeros=None, decimal_point=None):
if num_zeros is None:
num_zeros = self.num_zeros
if decimal_point is None:
decimal_point = self.decimal_point
return format_satoshis(x, num_zeros, decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
icon = read_QIcon("status_disconnected.png")
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
elif self.network.downloading_headers:
text = _("Downloading headers...")
icon = read_QIcon("status_waiting.png")
else:
text = _("Not connected")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.token_balance_list.update()
self.token_hist_model.refresh('update_tabs')
self.token_hist_list.update()
self.smart_contract_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, *, invoice=None, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, invoice=invoice, desc=tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Tachacoin addresses.'),
_('The tachacoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
self.receive_widgets = QTabWidget()
self.receive_widgets.addTab(self.receive_qr, 'QR Code')
self.receive_widgets.addTab(self.receive_address_e, 'Text')
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(self.receive_widgets)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_(f"{title} copied to clipboard:\n\n{content}"))
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Tachacoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Tachacoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Tachacoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Tachacoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Tachacoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
try:
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
except Exception as e:
self.show_error(str(e))
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
def on_invoice_status(self, key, status):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, invoice=invoice)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
self.do_clear()
if not tx.is_complete():
self.show_transaction(tx, invoice=invoice)
else:
self.broadcast_transaction(tx, invoice=invoice)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, *, invoice=None, tx_desc=None):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if tx_desc:
self.wallet.set_label(txid, tx_desc)
if invoice:
self.wallet.set_paid(invoice['id'], txid)
self.wallet.set_label(txid, invoice['message'])
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setEnabled(b)
#self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Tachacoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Tachacoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("tachacoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Warning: Tachacoin Electrum has issue with sweeping P2PK utxos. \n"
"You'd better use \"restore wallet\", import your private keys \n"
"and send total balance to another address instead.")))
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def set_token(self, token: 'Token'):
self.wallet.add_token(token)
self.token_balance_list.update()
self.token_hist_list.update()
self.token_hist_model.refresh('set_token')
def delete_token(self, key: str):
token_name = self.wallet.db.get_token(key).name
if not self.question(_("Remove {} from your token list ?")
.format(token_name)):
return
self.wallet.delete_token(key)
self.token_balance_list.update()
self.token_hist_model.refresh('delete_token')
def create_tokens_tab(self):
from .token_list import TokenBalanceList, TokenHistoryModel, TokenHistoryList
self.token_balance_list = tbl = TokenBalanceList(self)
self.token_hist_model = TokenHistoryModel(self)
self.token_hist_list = thl = TokenHistoryList(self, self.token_hist_model)
self.token_hist_model.set_view(self.token_hist_list)
splitter = QSplitter(self)
splitter.addWidget(tbl)
splitter.addWidget(thl)
splitter.setOrientation(Qt.Vertical)
return splitter
def token_add_dialog(self):
d = TokenAddDialog(self)
d.show()
def token_info_dialog(self, token: 'Token'):
d = TokenInfoDialog(self, token)
d.show()
def token_send_dialog(self, token: 'Token'):
d = TokenSendDialog(self, token)
d.show()
def do_token_pay(self, token: 'Token', pay_to, amount, gas_limit, gas_price, dialog, preview=False):
try:
datahex = 'a9059cbb{}{:064x}'.format(pay_to.zfill(64), amount)
script = contract_script(gas_limit, gas_price, datahex, token.contract_addr, opcodes.OP_CALL)
outputs = [TxOutput(TYPE_SCRIPT, script, 0), ]
tx_desc = _('Pay out {} {}').format(amount / (10 ** token.decimals), token.symbol)
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price,
token.bind_addr, dialog, None, preview)
except (BaseException,) as e:
traceback.print_exc(file=sys.stderr)
dialog.show_message(str(e))
def _smart_contract_broadcast(self, outputs, desc, gas_fee, sender, dialog, broadcast_done=None, preview=False):
coins = self.get_coins()
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, None, change_addr=sender,
gas_fee=gas_fee,
sender=sender)
except NotEnoughFunds:
dialog.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
dialog.show_message(str(e))
return
if preview:
self.show_transaction(tx, desc)
return
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
dialog.show_message(
_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
# confirmation dialog
msg = [
_(desc),
_("Mining fee") + ": " + self.format_amount_and_units(fee - gas_fee),
_("Gas fee") + ": " + self.format_amount_and_units(gas_fee),
]
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee - gas_fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc=desc)
if broadcast_done:
broadcast_done(tx)
self.sign_tx_with_password(tx, sign_done, password)
def create_smart_contract_tab(self):
from .smart_contract_list import SmartContractList
self.smart_contract_list = l = SmartContractList(self)
return self.create_list_tab(l)
def set_smart_contract(self, name: str, address: str, interface: list) -> bool:
if not is_hash160(address):
self.show_error(_('Invalid Address'))
self.smart_contract_list.update()
return False
self.wallet.db.smart_contracts[address] = (name, interface)
self.smart_contract_list.update()
return True
def delete_samart_contact(self, address: str) -> bool:
if not self.question(_("Remove {} from your list of smart contracts?".format(
self.wallet.db.smart_contracts[address][0]))):
return False
self.wallet.db.smart_contracts.pop(address)
self.smart_contract_list.update()
return True
def call_smart_contract(self, address, abi, args, sender, dialog):
data = eth_abi_encode(abi, args)
try:
result = self.network.run_from_another_thread(self.network.call_contract(address, data, sender))
except BaseException as e:
self.logger.exception('')
dialog.show_message(str(e))
return
types = list([x['type'] for x in abi.get('outputs', [])])
try:
if isinstance(result, dict):
output = eth_abi.decode_abi(types, binascii.a2b_hex(result['executionResult']['output']))
else:
output = eth_abi.decode_abi(types, binascii.a2b_hex(result))
def decode_x(x):
if isinstance(x, bytes):
try:
return x.decode()
except UnicodeDecodeError:
return str(x)
return str(x)
output = ','.join([decode_x(x) for x in output])
dialog.show_message(output)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(f'{e} {result}')
def sendto_smart_contract(self, address, abi, args, gas_limit, gas_price, amount, sender, dialog, preview):
try:
abi_encoded = eth_abi_encode(abi, args)
script = contract_script(gas_limit, gas_price, abi_encoded, address, opcodes.OP_CALL)
outputs = [TxOutput(TYPE_SCRIPT, script, amount), ]
tx_desc = 'contract sendto {}'.format(self.wallet.db.smart_contracts[address][0])
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, sender, dialog, None, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def create_smart_contract(self, name, bytecode, abi, constructor, args, gas_limit, gas_price, sender, dialog, preview):
def broadcast_done(tx):
if is_opcreate_script(bfh(tx.outputs()[0].address)):
reversed_txid = binascii.a2b_hex(tx.txid())[::-1]
output_index = b'\x00\x00\x00\x00'
contract_addr = bh2u(hash_160(reversed_txid + output_index))
self.set_smart_contract(name, contract_addr, abi)
try:
abi_encoded = ''
if constructor:
abi_encoded = eth_abi_encode(constructor, args)
script = contract_script(gas_limit, gas_price, bytecode + abi_encoded, None, opcodes.OP_CREATE)
outputs = [TxOutput(TYPE_SCRIPT, script, 0), ]
self._smart_contract_broadcast(outputs, 'create contract {}'.format(name), gas_limit * gas_price,
sender, dialog, broadcast_done, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def contract_create_dialog(self):
d = ContractCreateDialog(self)
d.show()
def contract_add_dialog(self):
d = ContractEditDialog(self)
d.show()
def contract_edit_dialog(self, address):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractEditDialog(self, contract)
d.show()
def contract_func_dialog(self, address):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractFuncDialog(self, contract)
d.show()
|
the-stack_0_23992 | import logging
from typing import Dict, List
from rlbot.agents.hivemind.python_hivemind import PythonHivemind
from rlbot.utils.structures.bot_input_struct import PlayerInput
from rlbot.utils.structures.game_data_struct import GameTickPacket
from maneuvers.kickoffs.kickoff import Kickoff
from maneuvers.general_defense import GeneralDefense
from maneuvers.refuel import Refuel
from rlutilities.linear_algebra import vec3
from strategy import teamplay_strategy
from strategy.hivemind_strategy import HivemindStrategy
from tools.drawing import DrawingTool
from tools.drone import Drone
from tools.game_info import GameInfo
RELEASE = True
class Beehive(PythonHivemind):
def __init__(self, *args):
super().__init__(*args)
self.info: GameInfo = None
self.team: int = None
self.draw: DrawingTool = None
self.drones: List[Drone] = []
self.strategy: HivemindStrategy = None
self.last_latest_touch_time = 0.0
def initialize_hive(self, packet: GameTickPacket) -> None:
index = next(iter(self.drone_indices))
self.team = packet.game_cars[index].team
self.info = GameInfo(self.team)
self.info.set_mode("soccar")
self.strategy = HivemindStrategy(self.info, self.logger)
self.draw = DrawingTool(self.renderer, self.team)
self.drones = [Drone(self.info.cars[i], i) for i in self.drone_indices]
self.logger.handlers[0].setLevel(logging.NOTSET) # override handler level
self.logger.setLevel(logging.INFO if RELEASE else logging.DEBUG)
self.logger.info("Beehive initialized")
def get_outputs(self, packet: GameTickPacket) -> Dict[int, PlayerInput]:
self.info.read_packet(packet, self.get_field_info())
# if a kickoff is happening and none of the drones have a Kickoff maneuver active, reset all drone maneuvers
if (
packet.game_info.is_kickoff_pause
and self.info.ball.position[0] == 0 and self.info.ball.position[1] == 0
and not any(isinstance(drone.maneuver, Kickoff) for drone in self.drones)
):
if len(self.drones) == 1:
self.drones[0].maneuver = None
else:
self.strategy.set_kickoff_maneuvers(self.drones)
# reset drone maneuvers when an opponent hits the ball
touch = packet.game_ball.latest_touch
if touch.time_seconds > self.last_latest_touch_time and touch.team != self.team:
self.last_latest_touch_time = touch.time_seconds
for drone in self.drones:
if drone.maneuver and drone.maneuver.interruptible(): # don't reset a drone while dodging/recovering
drone.maneuver = None
# reset drone maneuver when it gets demoed
for drone in self.drones:
if drone.maneuver and drone.car.demolished:
drone.maneuver = None
# if at least one drone doesn't have an active maneuver, execute strategy code
if None in [drone.maneuver for drone in self.drones]:
self.logger.debug("Setting maneuvers")
if len(self.drones) == 1:
self.drones[0].maneuver = teamplay_strategy.choose_maneuver(self.info, self.drones[0].car)
else:
self.strategy.set_maneuvers(self.drones)
for drone in self.drones:
if drone.maneuver is None:
continue
# execute maneuvers
drone.maneuver.step(self.info.time_delta)
drone.controls = drone.maneuver.controls
drone.maneuver.render(self.draw)
# draw names of maneuvers above our drones
self.draw.color(self.draw.yellow)
self.draw.string(drone.car.position + vec3(0, 0, 50), type(drone.maneuver).__name__)
# expire finished maneuvers
if drone.maneuver.finished:
drone.maneuver = None
if len(self.drones) > 1:
self.strategy.avoid_demos_and_team_bumps(self.drones)
self.strategy.render(self.draw)
self.draw.execute()
return {drone.index: drone.get_player_input() for drone in self.drones}
|
the-stack_0_23994 | # Logging für fertige Software um Fehlverhalten nachvollziehen zu können
import time
import logging
from pathlib import Path
# DEBUG: Detailed debug information
# INFO: Things working as intended
# WARNING: Something unexpected happened
# ERROR: The software cannot perform some function
# CRITICAL: Program crashes for example
# Setup the logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR) # ab welchem Fehler geloggt werden soll
# Formatter, FileHandler
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(funcName)s:%(message)s')
filepath = Path(__file__).parent.joinpath('log_standard.log') # Ordner der Datei (parent)
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def divide_integers(a, b):
try:
logger.debug("a={}, b={}".format(a, b))
result = a / b
return result
except Exception as e:
logger.exception("Exception was raised: {}".format(e))
def main():
for _ in range(3):
print(divide_integers(10, 0))
if __name__ == "__main__":
main() |
the-stack_0_23996 | """
.. codeauthor:: David Zwicker <[email protected]>
"""
import itertools
import pytest
from pde import UnitGrid
from pde.grids.boundaries.axis import BoundaryPair, get_boundary_axis
from pde.grids.boundaries.local import BCBase
def test_boundary_pair():
"""test setting boundary conditions for whole axis"""
g = UnitGrid([2, 3])
b = ["value", {"type": "derivative", "value": 1}]
for bl, bh in itertools.product(b, b):
bc = BoundaryPair.from_data(g, 0, [bl, bh])
blo = BCBase.from_data(g, 0, upper=False, data=bl)
bho = BCBase.from_data(g, 0, upper=True, data=bh)
assert bc.low == blo
assert bc.high == bho
assert bc == BoundaryPair(blo, bho)
if bl == bh:
assert bc == BoundaryPair.from_data(g, 0, bl)
assert list(bc) == [blo, bho]
bc.check_value_rank(0)
with pytest.raises(RuntimeError):
bc.check_value_rank(1)
data = {"low": {"value": 1}, "high": {"derivative": 2}}
bc1 = BoundaryPair.from_data(g, 0, data)
bc2 = BoundaryPair.from_data(g, 0, data)
assert bc1 == bc2 and bc1 is not bc2
bc2 = BoundaryPair.from_data(g, 1, data)
assert bc1 != bc2 and bc1 is not bc2
# miscellaneous methods
bc1.set_value(0)
b_lo, b_hi = bc1
assert b_lo == BCBase.from_data(g, 0, False, {"value": 0})
assert b_hi == BCBase.from_data(g, 0, True, {"derivative": 0})
assert b_lo is bc1[0]
assert b_lo is bc1[False]
assert b_hi is bc1[1]
assert b_hi is bc1[True]
def test_get_axis_boundaries():
"""test setting boundary conditions including periodic ones"""
g = UnitGrid([2])
for data in ["value", "derivative", "periodic"]:
b = get_boundary_axis(g, 0, data)
assert str(b) == '"' + data + '"'
if data == "periodic":
assert b.periodic
assert len(list(b)) == 0
else:
assert not b.periodic
assert len(list(b)) == 2
|
the-stack_0_23997 | import argparse
import atexit
import copy
import datetime
import numpy as np
import os
import torch
import tensorboardX
from functools import partial
from prob_mbrl import utils, models, algorithms, envs
def update_value_function(V,
opt,
H,
i,
states,
actions,
rewards,
discount,
V_target=None,
reg_weight=1e-3,
resample=False,
polyak_averaging=0.005):
V_tgt = V if V_target is None else V_target
V.train()
V.zero_grad()
discounted_rewards = torch.stack(
[r * discount(j) for j, r in enumerate(rewards[:H])])
returns = discounted_rewards.sum(0).detach()
# when we evaluate the value function with resample=True, we use
# the same seed. this ensures that we don't overwrite the noise masks
# used when resample = False, but that we get the same masks for V0 and VH
seed = torch.randint(2**32, [1])
if V.output_density is None:
V0 = V(states[0].detach(), resample=resample, seed=seed)
VH = V_tgt(states[H].detach(), resample=resample, seed=seed)
targets = returns + discount(H) * VH.detach()
loss = torch.nn.functional.mse_loss(V0, targets)
else:
# the output of the network are the parameters of a probability density
pV0 = V(states[0].detach(),
resample=resample,
seed=seed,
return_samples=False)
VH = V_tgt(states[H].detach(),
resample=resample,
seed=seed,
return_samples=True,
output_noise=False)
targets = returns + discount(H) * VH.detach()
loss = V.output_density.log_prob(targets, *pV0).mean()
if hasattr(V, 'regularization_loss'):
loss += reg_weight * V.regularization_loss()
loss.backward()
opt.step()
if V_target is not None and polyak_averaging > 0:
tau = polyak_averaging
for param, target_param in zip(V.parameters(), V_target.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
V.eval()
# print(torch.cat([torch.stack(rewards).sum(0), returns, targets, V0], -1))
def update_Qvalue_function(Q,
policy,
opt,
H,
i,
states,
actions,
rewards,
discount,
Q_target=None,
polyak_averaging=0.005):
Q_tgt = Q if Q_target is None else Q_target
Q.zero_grad()
N = rewards[0].shape[0]
discounted_rewards = torch.stack(
[r * discount(j) for j, r in enumerate(rewards[:H])])
returns = discounted_rewards.sum(0).detach()
# we evaluate the value function with resample=True, but with the same seed
# this ensures that we don't overwrite the noise masks used when
# resample = False, but that we get the same masks for V0 and VH
seed = torch.randint(2**32, [1])
inps0 = torch.cat([states[0], actions[0]], -1)
inpsH = torch.cat([states[H], policy(states[H], resample=True)], -1)
if Q.output_density is None:
Q0 = Q(inps0.detach(), resample=True, seed=seed)
QH = Q_tgt(inpsH.detach(), resample=True, seed=seed)
targets = returns + discount(H) * QH.detach()
loss = torch.nn.functional.mse_loss(Q0, targets)
else:
# the output of the network are the parameters of a probability density
pQ0 = Q(inps0.detach(), resample=True, seed=seed, return_samples=False)
QH = Q_tgt(inpsH.detach(),
resample=True,
seed=seed,
return_samples=True,
output_noise=False)
targets = returns + discount(H) * QH.detach()
loss = V.output_density.log_prob(targets, *pQ0).mean()
if hasattr(V, 'regularization_loss'):
loss += V.regularization_loss() / N
loss.backward()
opt.step()
if Q_target is not None and polyak_averaging > 0:
tau = polyak_averaging
for param, target_param in zip(Q.parameters(), Q_target.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
# print(torch.cat([rewards.sum(0), returns, targets, pQ0[0]], -1))
if __name__ == '__main__':
parser = argparse.ArgumentParser("Deep-PILCO with moment matching")
parser.add_argument('-e', '--env', type=str, default="Cartpole")
parser.add_argument('-o',
'--output_folder',
type=str,
default="~/.prob_mbrl/")
parser.add_argument('-s', '--seed', type=int, default=1)
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--n_initial_epi', type=int, default=0)
parser.add_argument('--load_from', type=str, default=None)
parser.add_argument('--pred_H', type=int, default=15)
parser.add_argument('--control_H', type=int, default=40)
parser.add_argument('--discount_factor', type=str, default=None)
parser.add_argument('--prioritized_replay', action='store_true')
parser.add_argument('--timesteps_to_sample',
type=utils.load_csv,
default=0)
parser.add_argument('--mm_groups', type=int, default=None)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--dyn_lr', type=float, default=1e-4)
parser.add_argument('--dyn_opt_iters', type=int, default=2000)
parser.add_argument('--dyn_batch_size', type=int, default=100)
parser.add_argument('--dyn_drop_rate', type=float, default=0.1)
parser.add_argument('--dyn_components', type=int, default=1)
parser.add_argument('--dyn_shape', type=utils.load_csv, default=[200, 200])
parser.add_argument('--pol_lr', type=float, default=1e-4)
parser.add_argument('--pol_clip', type=float, default=1.0)
parser.add_argument('--pol_drop_rate', type=float, default=0.1)
parser.add_argument('--pol_opt_iters', type=int, default=1000)
parser.add_argument('--pol_batch_size', type=int, default=100)
parser.add_argument('--ps_iters', type=int, default=100)
parser.add_argument('--pol_shape', type=utils.load_csv, default=[200, 200])
parser.add_argument('--val_lr', type=float, default=1e-4)
parser.add_argument('--val_drop_rate', type=float, default=0.1)
parser.add_argument('--val_batch_size', type=int, default=100)
parser.add_argument('--val_shape', type=utils.load_csv, default=[200, 200])
parser.add_argument('--plot_level', type=int, default=0)
parser.add_argument('--render', action='store_true')
parser.add_argument('--use_cuda', action='store_true')
parser.add_argument('--learn_reward', action='store_true')
parser.add_argument('--keep_best', action='store_true')
parser.add_argument('--stop_when_done', action='store_true')
parser.add_argument('--expl_noise', type=float, default=0.0)
# parameters
args = parser.parse_args()
loaded_from = args.load_from
if loaded_from is not None:
args = torch.load(os.path.join(loaded_from, 'args.pth.tar'))
# initialize environment
torch.set_num_threads(args.num_threads)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.set_flush_denormal(True)
if args.env in envs.__all__:
env = envs.__dict__[args.env]()
else:
import gym
env = gym.make(args.env)
env_name = env.spec.id if env.spec is not None else env.__class__.__name__
output_folder = os.path.expanduser(args.output_folder)
results_folder = os.path.join(
output_folder, "mc_pilco_no_mm_wval", env_name,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S.%f"))
try:
os.makedirs(results_folder)
except OSError:
pass
results_filename = os.path.join(results_folder, "experience.pth.tar")
torch.save(args, os.path.join(results_folder, 'args.pth.tar'))
D = env.observation_space.shape[0]
U = env.action_space.shape[0]
maxU = env.action_space.high
minU = env.action_space.low
# initialize reward/cost function
if (args.learn_reward or not hasattr(env, 'reward_func')
or env.reward_func is None):
reward_func = None
args.learn_reward = True
else:
reward_func = env.reward_func
# intialize to max episode steps if available
if hasattr(env, 'spec'):
if hasattr(env.spec, 'max_episode_steps'):
args.control_H = env.spec.max_episode_steps
args.stop_when_done = True
initial_experience = args.control_H * args.n_initial_epi
# initialize discount factor
if args.discount_factor is not None:
if args.discount_factor == 'auto':
args.discount_factor = (1.0 / args.control_H)**(2.0 /
args.control_H)
else:
args.discount_factor = float(args.discount_factor)
# initialize dynamics model
dynE = 2 * (D + 1) if args.learn_reward else 2 * D
if args.dyn_components > 1:
output_density = models.GaussianMixtureDensity(dynE / 2,
args.dyn_components)
dynE = (dynE + 1) * args.dyn_components + 1
else:
output_density = models.DiagGaussianDensity(dynE / 2)
dyn_model = models.mlp(
D + U,
dynE,
args.dyn_shape,
dropout_layers=[
models.modules.CDropout(args.dyn_drop_rate * np.ones(hid))
if args.dyn_drop_rate > 0 else None for hid in args.dyn_shape
],
nonlin=torch.nn.ReLU)
dyn = models.DynamicsModel(dyn_model,
reward_func=reward_func,
output_density=output_density).float()
# initalize policy
pol_model = models.mlp(D,
2 * U,
args.pol_shape,
dropout_layers=[
models.modules.BDropout(args.pol_drop_rate)
if args.pol_drop_rate > 0 else None
for hid in args.pol_shape
],
biases_initializer=None,
nonlin=torch.nn.ReLU,
output_nonlin=partial(models.DiagGaussianDensity,
U))
pol = models.Policy(pol_model, maxU, minU).float()
# initialize value function approximator
critic_model = models.mlp(D,
1,
args.val_shape,
dropout_layers=[
models.modules.CDropout(args.val_drop_rate)
if args.val_drop_rate > 0 else None
for hid in args.val_shape
],
nonlin=torch.nn.Tanh)
V = models.Regressor(critic_model).float()
V_target = copy.deepcopy(V)
V_target.load_state_dict(V.state_dict())
print('Dynamics model\n', dyn)
print('Policy\n', pol)
print('Critic\n', V)
# initalize experience dataset
exp = utils.ExperienceDataset()
if loaded_from is not None:
utils.load_checkpoint(loaded_from, dyn, pol, exp, V)
# initialize dynamics optimizer
opt1 = torch.optim.Adam(dyn.parameters(), args.dyn_lr)
# initialize policy optimizer
opt2 = torch.optim.Adam(pol.parameters(), args.pol_lr)
# initialize critic optimizer
opt3 = torch.optim.Adam(V.parameters(), args.val_lr)
if args.use_cuda and torch.cuda.is_available():
dyn = dyn.cuda()
pol = pol.cuda()
writer = tensorboardX.SummaryWriter(
logdir=os.path.join(results_folder, "logs"))
# callbacks
def on_close():
writer.close()
atexit.register(on_close)
# initial experience data collection
env.seed(args.seed)
rnd = lambda x, t: env.action_space.sample() # noqa: E731
while exp.n_samples() < initial_experience:
ret = utils.apply_controller(
env,
rnd,
min(args.control_H, initial_experience - exp.n_samples() + 1),
stop_when_done=args.stop_when_done)
exp.append_episode(*ret, policy_params=[])
if initial_experience > 0:
exp.policy_parameters[-1] = copy.deepcopy(pol.state_dict())
exp.save(results_filename)
# policy learning loop
expl_pol = lambda x, t: ( # noqa: E 731
pol(x) + args.expl_noise * rnd(x, t)).clip(minU, maxU)
render_fn = (lambda *args, **kwargs: env.render()) if args.render else None
update_V_fn = partial(update_value_function, V, opt3, args.pred_H)
for ps_it in range(args.ps_iters):
# apply policy
new_exp = exp.n_samples() + args.control_H
while exp.n_samples() < new_exp:
ret = utils.apply_controller(env,
expl_pol,
min(args.control_H,
new_exp - exp.n_samples() + 1),
stop_when_done=args.stop_when_done,
callback=render_fn)
exp.append_episode(*ret, policy_params=[])
exp.policy_parameters[-1] = copy.deepcopy(pol.state_dict())
exp.save(results_filename)
# train dynamics
X, Y = exp.get_dynmodel_dataset(deltas=True,
return_costs=args.learn_reward)
dyn.set_dataset(X.to(dyn.X.device, dyn.X.dtype),
Y.to(dyn.X.device, dyn.X.dtype))
utils.train_regressor(dyn,
args.dyn_opt_iters,
args.dyn_batch_size,
True,
opt1,
log_likelihood=dyn.output_density.log_prob,
summary_writer=writer,
summary_scope='model_learning/episode_%d' %
ps_it)
torch.save(dyn.state_dict(),
os.path.join(results_folder, 'latest_dynamics.pth.tar'))
# sample initial states for policy optimization
x0 = exp.sample_states(args.pol_batch_size,
timestep=0).to(dyn.X.device,
dyn.X.dtype).detach()
if args.plot_level > 0:
utils.plot_rollout(x0[:25], dyn, pol, args.pred_H * 2)
# train policy
def on_iteration(i, loss, states, actions, rewards, discount):
writer.add_scalar('mc_pilco/episode_%d/training loss' % ps_it,
loss, i)
print("Policy search iteration %d" % (ps_it + 1))
algorithms.mc_pilco(x0,
dyn,
pol,
args.pred_H,
opt2,
exp,
args.pol_opt_iters,
value_func=V,
discount=args.discount_factor,
pegasus=True,
mm_states=False,
mm_rewards=False,
mm_groups=args.mm_groups,
maximize=True,
clip_grad=args.pol_clip,
step_idx_to_sample=args.timesteps_to_sample,
init_state_noise=1e-1 * x0.std(0),
prioritized_replay=args.prioritized_replay,
on_iteration=on_iteration,
on_rollout=update_V_fn,
debug=args.debug)
torch.save(pol.state_dict(),
os.path.join(results_folder, 'latest_policy.pth.tar'))
torch.save(V.state_dict(),
os.path.join(results_folder, 'latest_critic.pth.tar'))
if args.plot_level > 0:
utils.plot_rollout(x0[:25], dyn, pol, args.pred_H * 2)
writer.add_scalar('robot/evaluation_loss',
torch.tensor(ret[2]).sum(), ps_it + 1)
|
the-stack_0_23998 | #!/usr/bin/env python
"""
Surface proxies of TRUNAJOD.
These surface proxies are measurements from text that consists on shallow
measures (proxies) that approximate to intrinsic properties of the text such
as cohesion, coherence, complexity. Examples of these measurements include but
are not limited to: Number of sentences, number of syllables, etc.
"""
import re
from copy import deepcopy
from functools import wraps
from math import log
from TRUNAJOD.syllabizer import Syllabizer
from TRUNAJOD.utils import is_word
from TRUNAJOD.verb_types import GERUND_VERBS
from TRUNAJOD.verb_types import INFINITIVE_VERBS
from TRUNAJOD.verb_types import PAST_TENSE_VERBS
PERIPHRASIS_GER = "VerbForm=Ger"
PERIPHRASIS_INF = "VerbForm=Inf"
PERIPHRASIS_PAR = "VerbForm=Part"
PERIPHRASIS_SUF = "|Perif"
NEGATION_WORDS = {
"no",
"ni",
"nunca",
"jamás",
"jamás",
"tampoco",
"nadie",
"nada",
"ningún",
"ninguno",
"ninguna",
}
def _fix_doc(func):
@wraps(func)
def function_wrapper(doc, infinitive_map):
fixed_doc = fix_parse_tree(doc, infinitive_map)
return func(fixed_doc, infinitive_map)
return function_wrapper
def add_periphrasis(doc, periphrasis_type, periphrasis_list):
"""Add periphrasis to SPACY tags.
One of the drawbacks that spaCy has, is that it does not address properly
periphrasis of texts (in our case Spanish text). This function adds
periphrasis to the text in order to improve further analysis such as
clause segmentation, and clause count. This is used by
:func:`TRUNAJOD.surface_proxies.fix_parse_tree`.
:param doc: Tokenized text
:type doc: Spacy Doc
:param type: Periphrasis type
:type type: string
:param periphrasis_list: List of periphrasis
:type periphrasis_list: List of strings
:return: Corrected doc
:rtype: Spacy Doc
"""
regexp = re.compile(periphrasis_type)
for token in doc:
if token.pos_ in {"VERB", "AUX"} or regexp.search(token.tag_):
if regexp.search(token.tag_):
for periphrasis in periphrasis_list:
# For multi-word periphrases
periphrasis_words = periphrasis.split()
pos = token.i - len(periphrasis_words)
if pos >= 0:
fail = False
for word in periphrasis_words:
if word.lower() != doc[pos].lemma_:
fail = True
break
pos = pos + 1
if not fail:
pos = token.i - len(periphrasis_words) + 1
for k in range(len(periphrasis_words)):
doc[pos].tag_ = doc[pos].tag_ + PERIPHRASIS_SUF
pos = pos + 1
return doc
def average_clause_length(doc, infinitive_map):
"""Return average clause length (heuristic).
This measurement is computed as the ratio of # of words / # of clauses.
To count clauses we do it heuristically, and you can refer to
:func:`TRUNAJOD.surface_proxies.clause_count` for more details.
:param doc: Text to be processed
:type doc: Spacy Doc
:param infinitve_map: Lexicon containing maps from conjugate to infinitive.
:type infinitive_map: dict
:return: Average clause length
:rtype: float
"""
return word_count(doc) / clause_count(doc, infinitive_map)
def average_sentence_length(doc):
"""Return average sentence length.
This measurement is computed as the ratio of: # of words / # of sentences.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: average sentence length
:rtype: float
"""
return word_count(doc) / sentence_count(doc)
def average_word_length(doc):
"""Return average word length.
Computed as the ratio of: # number of chars / # of words
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: Average word length
:rtype: float
"""
return char_count(doc) / word_count(doc)
def char_count(doc):
"""Return number of chars in a text.
This count does not consider anything that its ``Token.pos_`` tag is
either ``PUNCT`` or ``SPACE``.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: Char count
:rtype: int
"""
return sum(
[
len(token.lower_)
for token in doc
if token.pos_ not in {"PUNCT", "SPACE"}
]
)
@_fix_doc
def clause_count(doc, infinitive_map):
"""Return clause count (heuristic).
This function is decorated by the
:func:`TRUNAJOD:surface_proxies.fix_parse_tree` function, in order to
heuristically count clauses.
:param doc: Text to be processed.
:type doc: Spacy Doc
:param infinitve_map: Lexicon containing maps from conjugate to infinitive.
:type infinitive_map: dict
:return: Clause count
:rtype: int
"""
n_clauses = 0
regexp = re.compile("VerbForm=Fin")
regexp_perif = re.compile("Perif")
for token in doc:
verb_or_aux = token.pos_ in {"VERB", "AUX"}
if verb_or_aux and not regexp_perif.search(token.tag_):
if regexp.search(token.tag_):
n_clauses += 1
return n_clauses
def first_second_person_count(doc):
"""Count first|second person tokens.
:param doc: Processed text
:type doc: Spacy Doc
:return: First and second person count
:rtype: int
"""
criteria = re.compile("Person=1|Person=2")
return sum([1 for token in doc if criteria.search(token.tag_)])
def first_second_person_density(doc):
"""Compute density of first|second person.
:param doc: Processed text
:type doc: Spacy Doc
:return: Density 1,2 person
:rtype: float
"""
return first_second_person_count(doc) / word_count(doc)
def fix_parse_tree(doc, infinitive_map):
"""Fix SPACY parse tree.
We found that for Spanish texts, spaCy tags do not deal appropiately
with periphrasis and other lingüistic cues. This function address this
shortcome by modifying the parse tree computed by spaCy adding
periphrasis for Gerunds, Infinitive and Past tense verbs.
:param doc: Processed text
:type doc: Spacy Doc
:param infinitve_map: Lexicon containing maps from conjugate to infinitive.
:type infinitive_map: dict
:return: Fixed Doc
:rtype: Spacy Doc
"""
fixed_doc = deepcopy(doc)
for token in fixed_doc:
if token.pos_ in {"VERB", "AUX"}:
conjugate = infinitve(token.text, infinitive_map)
if conjugate is not None:
token.lemma_ = conjugate
fixed_doc = add_periphrasis(fixed_doc, PERIPHRASIS_INF, INFINITIVE_VERBS)
fixed_doc = add_periphrasis(fixed_doc, PERIPHRASIS_GER, GERUND_VERBS)
fixed_doc = add_periphrasis(fixed_doc, PERIPHRASIS_PAR, PAST_TENSE_VERBS)
return fixed_doc
def frequency_index(doc, frequency_dict):
"""Return frequency index.
The frequency index is defined as the average frequency of the rarest
word over sentences. To compute this, we use a dictionary. In the case
of this Spanish implementation we could use RAE dictionary CREA.
:param doc: Tokenized text.
:type doc: Spacy Doc
:return: Frequency index
:rtype: float
"""
n_sents = 0
aggregate_frec = 0
for sent in doc.sents:
minimum = 99999999999999
for token in sent:
if is_word(token):
frec = frequency_dict.get(token.lower_, 0)
if frec < minimum and frec > 0:
minimum = frec
if minimum > 0:
aggregate_frec += log(minimum, 10)
n_sents += 1
return aggregate_frec / n_sents
def get_word_depth(index, doc):
"""Get word depth in the parse tree given a sentence and token index.
The ``ROOT`` of the sentence is considered level 1. This method traverses
the parse tree until reaching the ``ROOT``, and counts all levels
traversed.
:param index: Position of the token in the sentence
:type index: int
:param doc: Tokenized text
:type doc: Spacy Doc
:return: Depth of the of the token
:rtype: int
"""
token = doc[index]
token_parent = doc[index].head
depth = 1
while token != token_parent:
token = token.head
token_parent = token_parent.head
depth += 1
return depth
def infinitve(conjugate, infinitive_map):
"""Get infinitive form of a conjugated verb.
Given a mapping of conjugate to infinitive, this function computes
the infinitive form of a conjugate verb. We provide models available
for downloading, so you do not have to worry about the ``infinitive_map``.
Regretfully we only provide models for Spanish texts.
:param conjugate: Verb to be processed
:type conjugate: string
:param infinitve_map: Lexicon containing maps from conjugate to infinitive.
:type infinitive_map: dict
:return: Infinitive form of the verb, None if not found
:rtype: string
"""
conjugate = conjugate.lower()
for word_list in infinitive_map:
if conjugate in word_list:
infinitive = infinitive_map[word_list]
return infinitive if infinitive else None
return None
def lexical_density(doc):
"""Compute lexical density.
The lexical density is defined as the Part of Speech ratio of the
following tags: ``VERB``, ``AUX``, ``ADJ``, ``NOUN``, ``PROPN`` and
``ADV`` over the total number of words.
:param doc: Tokenized text
:type doc: Spacy Doc
:return: Lexical density
:rtype: Float
"""
return pos_ratio(doc, "VERB|AUX|ADJ|NOUN|PROPN|ADV")
def connection_words_ratio(doc):
"""Get ratio of connecting words over total words of text.
This function computes the ratio of connective words over the total
number of words. This implementation is only supported in Spanish and
we consider the following lemmas: ``y``, ``o``, ``no``, ``si``.
:param doc: Tokenized text
:type doc: Spacy Doc
:return: Connection word ratio
:rtype: float
"""
return sum(
[
1
for token in doc
if token.lemma_.lower() in {"y", "o", "no", "si"}
and is_word(token)
]
) / word_count(doc)
def negation_density(doc):
"""Compute negation density.
This is defined as the ratio between number of occurrences of
``TRUNAJOD.surface_proxies.NEGATION_WORDS`` in the text over the
total word count.
:param doc: Tokenized text
:type doc: Spacy Doc
:return: Negation density
:rtype: float
"""
negation_count = 0
for token in doc:
if is_word(token) and token.lemma_.lower() in NEGATION_WORDS:
negation_count += 1
return negation_count / word_count(doc)
def node_similarity(node1, node2, is_central_node=False):
"""Compute node similarity recursively, based on common children POS.
This function is called inside
:func:`TRUNAJOD.surface_proxies.syntactic_similarity`
so is an auxiliary function. In the common use case, is unlikely you will
need to call this function directly, but we provide it for debugging
purposes.
:param node1: Node of the parse tree.
:type node1: Spacy Token
:param node2: Node of the parse tree
:type node2: Spacy Token
:param is_central_node: Whether is the central node, defaults to False
:type is_central_node: bool, optional
:return: Total childs in common between node1 and node2.
:rtype: int
"""
similarity = 0
common_childs_node1 = set()
common_childs_node2 = set()
if is_central_node:
if node1.pos_ == node2.pos_:
similarity += 1
else:
return 0
for child1 in node1.children:
for child2 in node2.children:
child_not_seen = (
child1 not in common_childs_node1
and child2 not in common_childs_node2
)
if child1.pos_ == child2.pos_ and child_not_seen:
similarity += 1
common_childs_node1.add(child1)
common_childs_node2.add(child2)
similarity += node_similarity(child1, child2, False)
return similarity
def noun_count(doc):
"""Count nouns in the text.
Count all tokens which Part of Speech tag is either ``NOUN`` or
``PROPN``.
:param doc: Text to be processed
:type doc: Spacy Doc
:return: Noun count
:rtype: int
"""
return sum([1 for token in doc if token.pos_ in {"NOUN", "PROPN"}])
def noun_phrase_density(doc):
"""Compute NP density.
To compute NP density we do it heuristically. We might improve it in the
future by using some NP-chunking strategy. For counting noun phrases, we
check that for a node in the parse tree, its head is a Noun. Then, we
check if either of the following conditions is met:
* The token is the article ``del`` or ``al``
* The token dependency is not ``cc``, ``case`` or ``cop``, and the token
is not a punctuation and the token is not the ``ROOT``
Then we compute the ratio between # of NP / Noun count.
:param doc: Tokenized text.
:type doc: Spacy Doc
:return: NP density
:rtype: float
"""
children = 0
for sent in doc.sents:
for token in sent:
if token.head.pos_ in {"NOUN", "PROPN"}:
spanish_al_del_article = token.text.upper() in {"AL", "DEL"}
condition = token.dep_ not in {"cc", "case", "cop"}
condition = condition and token.pos_ != "PUNCT"
condition = condition and token.head != token
if spanish_al_del_article or condition:
children += 1
return children / noun_count(doc)
def pos_dissimilarity(doc):
"""Measure Part of Speech disimilarity over sentences.
The dissimilarity of POS between two sentences is the difference
between POS distribution over the total population of POS tags.
It is computed as follows:
* For each sentence, PoS tag distribution is computed.
* For each tag in either of the two sentences, we compute the difference
in distributions (absolute value)
* This difference is divided by the total population of the sentences
This is done for each pair of sentences (``N - 1`` sentences) and the
results are averaged (again, over ``N - 1``)
:param doc: Processed text
:type doc: Spacy Doc
:return: Part of Speech dissimilarity
:rtype: float
"""
sent_pos_dist = []
for sent in doc.sents:
sent_pos_dist.append(pos_distribution(sent))
disimilarity = 0
for i in range(len(sent_pos_dist) - 1):
common_adj_tags = set(sent_pos_dist[i].keys()) | set(
sent_pos_dist[i + 1].keys()
)
difference = 0
totals = 0
for pos in common_adj_tags:
pos_dist_value = sent_pos_dist[i].get(pos, 0)
pos_dist_value_next = sent_pos_dist[i + 1].get(pos, 0)
difference += abs(pos_dist_value - pos_dist_value_next)
totals += pos_dist_value + pos_dist_value_next
disimilarity += difference / totals
return disimilarity / (len(sent_pos_dist) - 1)
def pos_distribution(doc):
"""Get POS distribution from a processed text.
Let us suppose that a given sentence has the following pos tags:
``[NOUN, VERB, ADJ, VERB, ADJ]``. The PoS distribution would be
* ``NOUN: 1``
* ``VERB: 2``
* ``ADJ: 2``
This function returns this distrubution as a dict.
:param doc: Processed text
:type doc: Spacy Doc
:return: POS distribution as a dict key POS, value Count
:rtype: dict
"""
distribution = {}
for token in doc:
distribution[token.pos_] = distribution.get(token.pos_, 0) + 1
return distribution
def pos_ratio(doc, pos_types):
"""Compute POS ratio given desired type of ratio.
The ``pos_types`` might be a regular expression if a composed ratio
is needed. An example of usage would be ``pos_ratio(doc, "VERB|AUX")``.
:param doc: Spacy processed text
:type doc: Spacy Doc
:param pos_types: POS to get the ratio
:type pos_types: string
:return: Ratio over number of words
:rtype: float
"""
pos_regex = re.compile(pos_types)
total_words = 0
total_pos_tags = 0
for token in doc:
if is_word(token):
total_words += 1
if pos_regex.search(token.pos_):
total_pos_tags += 1
return total_pos_tags / total_words
def sentence_count(doc):
"""Return number of sentences in a text.
:param doc: Text to be processed
:type doc: Spacy Doc
:return: Number of sentences in the text
:rtype: int
"""
return len(list(doc.sents))
def syllable_count(doc):
"""Return number of syllables of a text.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: Number of syllables in the text
:rtype: int
"""
return sum(
[
Syllabizer.number_of_syllables(token.lower_)
for token in doc
if token.pos_ != "PUNCT"
]
)
def syntactic_similarity(doc):
"""Compute average syntactic similarity between sentences.
For each pair of sentences, compute the similarity between each
pair of nodes, using :func:`TRUNAJOD.surface_proxies.node_similarity`
Then, the result is averaged over the ``N - 1`` pair of sentences.
:param doc: Processed text
:type doc: Spacy Doc
:return: Average syntactic similarity over sentences.
:rtype: float
"""
n_sentences = 0
prev_sent = None
aggregate_similarity = 0
for sent in doc.sents:
n_sentences += 1
if prev_sent is not None:
common_nodes = node_similarity(sent.root, prev_sent.root, True)
aggregate_similarity += common_nodes / (
len(sent) + len(prev_sent) - common_nodes
)
prev_sent = sent
return aggregate_similarity / (n_sentences - 1)
def syllable_word_ratio(doc):
"""Return average syllable word ratio.
It is computed as # Syllables / # of words.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: syllable word ratio
:rtype: float
"""
return syllable_count(doc) / word_count(doc)
def subordination(doc, infinitive_map):
"""Return subordination, defined as the clause density.
The subordination is defined as the ratio between # of clauses and
the # of sentences. To compute number of clauses, a heuristic is used.
:param doc: Text to be processed.
:type doc: Spacy Doc
:param infinitve_map: Lexicon containing maps from conjugate to infinitive.
:type infinitive_map: dict
:return: Subordination index
:rtype: float
"""
return clause_count(doc, infinitive_map) / sentence_count(doc)
def verb_noun_ratio(doc):
"""Compute Verb/Noun ratio.
:param doc: Processed text
:type doc: Spacy Doc
:return: Verb Noun ratio
:rtype: float
"""
return pos_ratio(doc, "VERB|AUX") / pos_ratio(doc, "NOUN|PROPN")
def words_before_root(doc, max_depth=4):
"""Return average word count of words before root.
For each sentence, word count before root is computed in the case that the
root is a verb. Otherwise, the root is considered to be the verb in the
highest node in the parse tree.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: Average words before root
:rtype: float
"""
total_words_before_root = 0
total_roots = 0
for sent in doc.sents:
root_is_verb = False
verb_highest_node_found = False
root = None
for token in sent:
if token.dep_ == "ROOT":
root = token
if token.pos_ in {"VERB", "AUX"}:
root_is_verb = True
if not root_is_verb:
for depth in range(2, max_depth):
if not verb_highest_node_found:
for token in sent:
if token.pos_ in {"VERB", "AUX"}:
if get_word_depth(token.i, doc) == depth:
root = token
verb_highest_node_found = True
break
root_found = False
verb_root_found = root_is_verb or verb_highest_node_found
words_before_root = 0
for token in sent:
if token == root and verb_root_found:
root_found = True
total_roots += 1
total_words_before_root += words_before_root
else:
aux = token.pos_ != "PUNCT" and verb_root_found
if (not root_found) and aux:
words_before_root += 1
return total_words_before_root / total_roots
def word_count(doc):
"""Return number of words in a text.
:param doc: Text to be processed.
:type doc: Spacy Doc
:return: Word count
:rtype: int
"""
return sum([1 for token in doc if is_word(token)])
|
the-stack_0_23999 | import petl
import simpleeval
from ..step import Step
# NOTE:
# We need to review simpleeval perfomance for using it with row_filter
# Currently, metadata profiles are not fully finished; will require improvements
class row_filter(Step):
code = "row-filter"
def __init__(self, descriptor=None, *, formula=None, function=None):
self.setinitial("formula", formula)
self.setinitial("function", function)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
formula = self.get("formula")
function = self.get("function")
if formula:
# NOTE: review EvalWithCompoundTypes/sync with checks
evalclass = simpleeval.EvalWithCompoundTypes
function = lambda row: evalclass(names=row).eval(formula)
yield from resource.to_petl().select(function)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": [],
"properties": {
"formula": {type: "string"},
"function": {},
},
}
class row_search(Step):
code = "row-search"
def __init__(self, descriptor=None, *, regex=None, field_name=None, negate=False):
self.setinitial("regex", regex)
self.setinitial("fieldName", field_name)
self.setinitial("negate", negate)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
regex = self.get("regex")
field_name = self.get("fieldName")
negate = self.get("negate")
search = petl.searchcomplement if negate else petl.search
if field_name:
yield from search(resource.to_petl(), field_name, regex)
else:
yield from search(resource.to_petl(), regex)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["regex"],
"properties": {
"regex": {},
"fieldName": {"type": "string"},
"negate": {},
},
}
class row_slice(Step):
code = "row-slice"
def __init__(
self,
descriptor=None,
*,
start=None,
stop=None,
step=None,
head=None,
tail=None,
):
self.setinitial("start", start)
self.setinitial("stop", stop)
self.setinitial("step", step)
self.setinitial("head", head)
self.setinitial("tail", tail)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
start = self.get("start")
stop = self.get("stop")
step = self.get("step")
head = self.get("head")
tail = self.get("tail")
if head:
yield from resource.to_petl().head(head)
elif tail:
yield from resource.to_petl().tail(tail)
else:
yield from resource.to_petl().rowslice(start, stop, step)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": [],
"properties": {
"start": {},
"stop": {},
"step": {},
"head": {},
"tail": {},
},
}
class row_sort(Step):
code = "row-sort"
def __init__(self, descriptor=None, *, field_names=None, reverse=False):
self.setinitial("fieldNames", field_names)
self.setinitial("reverse", reverse)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
field_names = self.get("fieldNames")
reverse = self.get("reverse")
yield from resource.to_petl().sort(field_names, reverse=reverse)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["fieldNames"],
"properties": {
"fieldNames": {"type": "array"},
"reverse": {},
},
}
class row_split(Step):
code = "row-add"
def __init__(self, descriptor=None, *, pattern=None, field_name=None):
self.setinitial("pattern", pattern)
self.setinitial("fieldName", field_name)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
pattern = self.get("pattern")
field_name = self.get("fieldName")
yield from resource.to_petl().splitdown(field_name, pattern)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["fieldName", "pattern"],
"properties": {
"fieldName": {"type": "string"},
"pattern": {"type": "string"},
},
}
class row_subset(Step):
code = "row-subset"
def __init__(self, descriptor=None, *, subset=None, field_name=None):
assert subset in ["conflicts", "distinct", "duplicates", "unique"]
self.setinitial("subset", subset)
self.setinitial("fieldName", field_name)
super().__init__(descriptor)
# Transform
def transform_resource(self, resource):
subset = self.get("subset")
field_name = self.get("fieldName")
if subset == "conflicts":
yield from resource.to_petl().conflicts(field_name)
elif subset == "distinct":
yield from resource.to_petl().distinct(field_name)
elif subset == "duplicates":
yield from resource.to_petl().duplicates(field_name)
elif subset == "unique":
yield from resource.to_petl().unique(field_name)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["subset"],
"properties": {
"subset": {"type": "string"},
"fieldName": {"type": "string"},
},
}
class row_ungroup(Step):
code = "row-ungroup"
def __init__(
self,
descriptor=None,
*,
selection=None,
group_name=None,
value_name=None,
):
assert selection in ["first", "last", "min", "max"]
self.setinitial("selection", selection)
self.setinitial("groupName", group_name)
self.setinitial("valueName", value_name)
super().__init__(descriptor)
def transform_resource(self, resource):
selection = self.get("selection")
group_name = self.get("groupName")
value_name = self.get("valueName")
function = getattr(petl, f"groupselect{selection}")
if selection in ["first", "last"]:
yield from function(resource.to_petl(), group_name)
else:
yield from function(resource.to_petl(), group_name, value_name)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["groupName", "selection"],
"properties": {
"selection": {"type": "string"},
"groupName": {"type": "string"},
"valueName": {"type": "string"},
},
}
|
the-stack_0_24000 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinbase transactions return the correct categories.
Tests listtransactions, listsinceblock, and gettransaction.
"""
from test_framework.test_framework import FinalcoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(FinalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
# Generate one block to an address
address = self.nodes[0].getnewaddress()
self.generatetoaddress(self.nodes[0], 1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
# Coinbase transaction is immature after 1 confirmation
self.assert_category("immature", address, txid, 0)
# Mine another 99 blocks on top
self.generate(self.nodes[0], 99)
# Coinbase transaction is still immature after 100 confirmations
self.assert_category("immature", address, txid, 99)
# Mine one more block
self.generate(self.nodes[0], 1)
# Coinbase transaction is now matured, so category is "generate"
self.assert_category("generate", address, txid, 100)
# Orphan block that paid to address
self.nodes[0].invalidateblock(hash)
# Coinbase transaction is now orphaned
self.assert_category("orphan", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
|
the-stack_0_24001 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""adapted from rat-sql"""
import copy
import operator
import attr
@attr.s
class Hypothesis:
inference_state = attr.ib()
next_choices = attr.ib()
score = attr.ib(default=0)
choice_history = attr.ib(factory=list)
score_history = attr.ib(factory=list)
def beam_search(model, orig_item, preproc_item, beam_size, max_steps):
inference_state, next_choices = model.begin_inference(orig_item, preproc_item)
beam = [Hypothesis(inference_state, next_choices)]
finished = []
for step in range(max_steps):
# Check if all beams are finished
if len(finished) == beam_size:
break
candidates = []
# For each hypothesis, get possible expansions
# Score each expansion
for hyp in beam:
candidates += [(hyp, choice, choice_score.item(),
hyp.score + choice_score.item())
for choice, choice_score in hyp.next_choices]
# Keep the top K expansions
candidates.sort(key=operator.itemgetter(3), reverse=True)
candidates = candidates[:beam_size - len(finished)]
# Create the new hypotheses from the expansions
beam = []
for hyp, choice, choice_score, cum_score in candidates:
inference_state = hyp.inference_state.clone()
next_choices = inference_state.step(choice)
if next_choices is None:
finished.append(Hypothesis(
inference_state,
None,
cum_score,
hyp.choice_history + [choice],
hyp.score_history + [choice_score]))
else:
beam.append(
Hypothesis(inference_state, next_choices, cum_score,
hyp.choice_history + [choice],
hyp.score_history + [choice_score]))
finished.sort(key=operator.attrgetter('score'), reverse=True)
return finished
|
the-stack_0_24002 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.p2p import P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
DERSIG_HEIGHT = 1251
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'[email protected]',
'-par=1', # Use only one script thread to get the exact log msg for testing
]]
self.setup_clean_chain = True
self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_dersig_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip66'],
{
"active": is_active,
"height": DERSIG_HEIGHT,
"type": "buried",
},
)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
self.test_dersig_info(is_active=False)
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
peer.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
|
the-stack_0_24004 | from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import PeriodIndex, period_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tseries.offsets import BDay, Minute
DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10))
PERIOD_RANGE = (period_range, "pi", datetime(2005, 1, 1), datetime(2005, 1, 10))
TIMEDELTA_RANGE = (timedelta_range, "tdi", "1 day", "10 day")
all_ts = pytest.mark.parametrize(
"_index_factory,_series_name,_index_start,_index_end",
[DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE],
)
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def create_index(_index_factory):
def _create_index(*args, **kwargs):
""" return the _index_factory created using the args, kwargs """
return _index_factory(*args, **kwargs)
return _create_index
# new test to check that all FutureWarning are triggered
def test_deprecating_on_loffset_and_base():
# GH 31809
idx = pd.date_range("2001-01-01", periods=4, freq="T")
df = pd.DataFrame(data=4 * [range(2)], index=idx, columns=["a", "b"])
with tm.assert_produces_warning(FutureWarning):
pd.Grouper(freq="10s", base=0)
with tm.assert_produces_warning(FutureWarning):
pd.Grouper(freq="10s", loffset="0s")
with tm.assert_produces_warning(FutureWarning):
df.groupby("a").resample("3T", base=0).sum()
with tm.assert_produces_warning(FutureWarning):
df.groupby("a").resample("3T", loffset="0s").sum()
with tm.assert_produces_warning(FutureWarning):
df.resample("3T", base=0).sum()
with tm.assert_produces_warning(FutureWarning):
df.resample("3T", loffset="0s").sum()
msg = "'offset' and 'base' cannot be present at the same time"
with tm.assert_produces_warning(FutureWarning):
with pytest.raises(ValueError, match=msg):
df.groupby("a").resample("3T", base=0, offset=0).sum()
@all_ts
@pytest.mark.parametrize("arg", ["mean", {"value": "mean"}, ["mean"]])
def test_resample_loffset_arg_type(frame, create_index, arg):
# GH 13218, 15002
df = frame
expected_means = [df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)]
expected_index = create_index(df.index[0], periods=len(df.index) / 2, freq="2D")
# loffset coerces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({"value": expected_means}, index=expected_index)
with tm.assert_produces_warning(FutureWarning):
result_agg = df.resample("2D", loffset="2H").agg(arg)
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
tm.assert_frame_equal(result_agg, expected)
@pytest.mark.parametrize(
"loffset", [timedelta(minutes=1), "1min", Minute(1), np.timedelta64(1, "m")]
)
def test_resample_loffset(loffset):
# GH 7687
rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
s = Series(np.random.randn(14), index=rng)
with tm.assert_produces_warning(FutureWarning):
result = s.resample(
"5min", closed="right", label="right", loffset=loffset
).mean()
idx = date_range("1/1/2000", periods=4, freq="5min")
expected = Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1),
)
tm.assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample("w-sun").last()
business_day_offset = BDay()
with tm.assert_produces_warning(FutureWarning):
expected = ser.resample("w-sun", loffset=-business_day_offset).last()
assert result.index[0] - business_day_offset == expected.index[0]
def test_resample_loffset_upsample():
# GH 20744
rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
s = Series(np.random.randn(14), index=rng)
with tm.assert_produces_warning(FutureWarning):
result = s.resample(
"5min", closed="right", label="right", loffset=timedelta(minutes=1)
).ffill()
idx = date_range("1/1/2000", periods=4, freq="5min")
expected = Series([s[0], s[5], s[10], s[-1]], index=idx + timedelta(minutes=1))
tm.assert_series_equal(result, expected)
def test_resample_loffset_count():
# GH 12725
start_time = "1/1/2000 00:00:00"
rng = date_range(start_time, periods=100, freq="S")
ts = Series(np.random.randn(len(rng)), index=rng)
with tm.assert_produces_warning(FutureWarning):
result = ts.resample("10S", loffset="1s").count()
expected_index = date_range(start_time, periods=10, freq="10S") + timedelta(
seconds=1
)
expected = Series(10, index=expected_index)
tm.assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
with tm.assert_produces_warning(FutureWarning):
result = ts.resample("10S", loffset="1s").size()
tm.assert_series_equal(result, expected)
def test_resample_base():
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with tm.assert_produces_warning(FutureWarning):
resampled = ts.resample("5min", base=2).mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_float_base():
# GH25161
dt = pd.to_datetime(
["2018-11-26 16:17:43.51", "2018-11-26 16:17:44.51", "2018-11-26 16:17:45.51"]
)
s = Series(np.arange(3), index=dt)
base = 17 + 43.51 / 60
with tm.assert_produces_warning(FutureWarning):
result = s.resample("3min", base=base).size()
expected = Series(
3, index=pd.DatetimeIndex(["2018-11-26 16:17:43.51"], freq="3min")
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kind", ["period", None, "timestamp"])
@pytest.mark.parametrize("agg_arg", ["mean", {"value": "mean"}, ["mean"]])
def test_loffset_returns_datetimeindex(frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)]
expected_index = period_range(df.index[0], periods=len(df.index) / 2, freq="2D")
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({"value": expected_means}, index=expected_index)
with tm.assert_produces_warning(FutureWarning):
result_agg = df.resample("2D", loffset="2H", kind=kind).agg(agg_arg)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
tm.assert_frame_equal(result_agg, expected)
@pytest.mark.parametrize(
"start,end,start_freq,end_freq,base,offset",
[
("19910905", "19910909 03:00", "H", "24H", 10, "10H"),
("19910905", "19910909 12:00", "H", "24H", 10, "10H"),
("19910905", "19910909 23:00", "H", "24H", 10, "10H"),
("19910905 10:00", "19910909", "H", "24H", 10, "10H"),
("19910905 10:00", "19910909 10:00", "H", "24H", 10, "10H"),
("19910905", "19910909 10:00", "H", "24H", 10, "10H"),
("19910905 12:00", "19910909", "H", "24H", 10, "10H"),
("19910905 12:00", "19910909 03:00", "H", "24H", 10, "10H"),
("19910905 12:00", "19910909 12:00", "H", "24H", 10, "10H"),
("19910905 12:00", "19910909 12:00", "H", "24H", 34, "34H"),
("19910905 12:00", "19910909 12:00", "H", "17H", 10, "10H"),
("19910905 12:00", "19910909 12:00", "H", "17H", 3, "3H"),
("19910905 12:00", "19910909 1:00", "H", "M", 3, "3H"),
("19910905", "19910913 06:00", "2H", "24H", 10, "10H"),
("19910905", "19910905 01:39", "Min", "5Min", 3, "3Min"),
("19910905", "19910905 03:18", "2Min", "5Min", 3, "3Min"),
],
)
def test_resample_with_non_zero_base(start, end, start_freq, end_freq, base, offset):
# GH 23882
s = pd.Series(0, index=pd.period_range(start, end, freq=start_freq))
s = s + np.arange(len(s))
with tm.assert_produces_warning(FutureWarning):
result = s.resample(end_freq, base=base).mean()
result = result.to_timestamp(end_freq)
# test that the replacement argument 'offset' works
result_offset = s.resample(end_freq, offset=offset).mean()
result_offset = result_offset.to_timestamp(end_freq)
tm.assert_series_equal(result, result_offset)
# to_timestamp casts 24H -> D
result = result.asfreq(end_freq) if end_freq == "24H" else result
with tm.assert_produces_warning(FutureWarning):
expected = s.to_timestamp().resample(end_freq, base=base).mean()
if end_freq == "M":
# TODO: is non-tick the relevant characteristic? (GH 33815)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
def test_resample_base_with_timedeltaindex():
# GH 10530
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with tm.assert_produces_warning(FutureWarning):
with_base = ts.resample("2s", base=5).mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.