filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_8653 | # Please Pass the coded messages
from itertools import combinations
def solution(l):
l.sort(reverse = True)
for i in reversed(range(1, len(l) + 1)):
for tup in combinations(l, i):
if sum(tup) % 3 == 0: return int(''.join(map(str, tup)))
return 0
|
the-stack_0_8654 | """
Lexicon Plesk Provider
Author: Jens Reimann, 2018
API Docs: https://docs.plesk.com/en-US/onyx/api-rpc
"""
from __future__ import absolute_import
import logging
from collections import OrderedDict
import requests
from lexicon.providers.base import Provider as BaseProvider
try:
import xmltodict # optional dependency
except ImportError:
pass
LOGGER = logging.getLogger(__name__)
PLEX_URL_SUFFIX = "/enterprise/control/agent.php"
NAMESERVER_DOMAINS = []
def provider_parser(subparser):
"""Configure provider parser for Plesk"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument(
"--plesk-server", help="specify URL to the Plesk Web UI, including the port"
)
class Provider(BaseProvider):
"""Provider class for Plesk"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option("plesk_server")
if self.api_endpoint.endswith("/"):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith(PLEX_URL_SUFFIX):
self.api_endpoint += PLEX_URL_SUFFIX
self.site_name = self.domain
assert self.site_name is not None
self.domain_id = None
self.username = self._get_provider_option("auth_username")
assert self.username is not None
self.password = self._get_provider_option("auth_password")
assert self.password is not None
def __simple_request(self, rtype, operation, req):
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
result = response["result"]
if isinstance(result, list):
for record in result:
if record["status"] == "error":
raise Exception(
"API returned at least one error: %s" % record["errtext"]
)
elif response["result"]["status"] == "error":
errcode = response["result"]["errcode"]
errtext = response["result"]["errtext"]
raise Exception("API returned error: %s (%s)" % (errcode, errtext))
return response
def __plesk_request(self, request):
headers = {}
headers["Content-type"] = "text/xml"
headers["HTTP_PRETTY_PRINT"] = "TRUE"
headers["HTTP_AUTH_LOGIN"] = self.username
headers["HTTP_AUTH_PASSWD"] = self.password
xml = xmltodict.unparse({"packet": request}, pretty=True)
LOGGER.debug("Request: %s", xml)
response = requests.post(
self.api_endpoint,
headers=headers,
data=xml,
auth=(self.username, self.password),
)
data = response.text
LOGGER.debug("Response: %s", data)
result = xmltodict.parse(data)
return result["packet"]
def __find_site(self):
return self.__simple_request(
"site",
"get",
OrderedDict([("filter", {"name": self.site_name}), ("dataset", {})]),
)["result"]["id"]
def _authenticate(self):
self.domain_id = self.__find_site()
if self.domain_id is None:
raise Exception("Domain not found")
def _create_record(self, rtype, name, content):
return self.__create_entry(rtype, name, content, None)
def _list_records(self, rtype=None, name=None, content=None):
entries = self.__find_dns_entries(rtype, name, content)
LOGGER.debug("list_records: %s", entries)
return entries
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
entries = self.__find_dns_entries(rtype, name, None)
LOGGER.debug("Entries found: %s", entries)
if not entries:
raise Exception("No entry found for updating")
identifier = entries[0]["id"]
entry = self.__get_dns_entry(identifier)
ids = []
for an_entry in entries:
ids.append(an_entry["id"])
self.__delete_dns_records_by_id(ids)
else:
entry = self.__get_dns_entry(identifier)
self.__delete_dns_records_by_id([identifier])
assert entry is not None
LOGGER.debug("Updating: %s", entry)
if rtype:
entry["type"] = rtype
if name:
entry["host"] = name
if content:
entry["value"] = content
return self.__create_entry(
entry["type"], entry["host"], entry["value"], entry["opt"]
)
def __create_entry(self, rtype, host, value, opt):
entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value)
if entries:
return True # already exists
self.__simple_request(
"dns",
"add_rec",
OrderedDict(
[
("site-id", self.domain_id),
("type", rtype),
("host", self._relative_name(host)),
("value", value),
("opt", opt),
]
),
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
self.__delete_dns_records_by_id([identifier])
return True
entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content)
ids = []
for entry in entries:
ids.append(entry["id"])
self.__delete_dns_records_by_id(ids)
return bool(ids)
def __get_dns_entry(self, identifier):
return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[
"result"
]["data"]
def __find_dns_entries(self, rtype=None, host=None, value=None):
LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value)
if value and rtype and rtype in ["CNAME"]:
LOGGER.debug("CNAME transformation")
value = value.rstrip(".") + "."
if host:
host = self._fqdn_name(host)
result = self.__simple_request(
"dns", "get_rec", {"filter": {"site-id": self.domain_id}}
)
entries = []
for record in result["result"]:
LOGGER.debug("Record: %s", record)
if (rtype is not None) and (record["data"]["type"] != rtype):
LOGGER.debug(
"\tType doesn't match - expected: '%s', found: '%s'",
rtype,
record["data"]["type"],
)
continue
if (host is not None) and (record["data"]["host"] != host):
LOGGER.debug(
"\tHost doesn't match - expected: '%s', found: '%s'",
host,
record["data"]["host"],
)
continue
if (value is not None) and (record["data"]["value"] != value):
LOGGER.debug(
"\tValue doesn't match - expected: '%s', found: '%s'",
value,
record["data"]["value"],
)
continue
entry = {
"id": record["id"],
"type": record["data"]["type"],
"name": self._full_name(record["data"]["host"]),
"ttl": None,
"options": {},
}
if record["data"]["type"] in ["CNAME"]:
entry["content"] = record["data"]["value"].rstrip(".")
else:
entry["content"] = record["data"]["value"]
if record["data"]["type"] == "MX":
entry["options"]["mx"] = {"priority": int(record["data"]["opt"])}
entries.append(entry)
return entries
def __delete_dns_records_by_id(self, ids):
if not ids:
return
req = []
for i in ids:
req.append({"del_rec": {"filter": {"id": i}}})
self.__plesk_request({"dns": req})
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used for Plesk provider
pass
|
the-stack_0_8655 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "08 Mar 2016"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
#import xrt.backends.raycing.apertures as ra
import xrt.backends.raycing.oes as roe
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.raycing.screens as rsc
showIn3D = False
mGold = rm.Material('Au', rho=19.3, kind='FZP')
E0, dE = 400, 5
def build_beamline(nrays=1e5):
beamLine = raycing.BeamLine(height=0)
# source=rs.GeometricSource(
# beamLine, 'GeometricSource', (0, 0, 0),
# nrays=nrays, distx='flat', dx=0.12, distz='flat', dz=0.12,
# dxprime=0, dzprime=0,
# distE='flat', energies=(E0-dE, E0+dE), polarization='horizontal')
rs.GeometricSource(
beamLine, 'GeometricSource', (0, 0, 0),
nrays=nrays, distx='annulus', dx=(0, 0.056),
dxprime=0, dzprime=0,
distE='flat', energies=(E0-dE, E0+dE), polarization='horizontal')
beamLine.fsm1 = rsc.Screen(beamLine, 'DiamondFSM1', (0, 10., 0))
# beamLine.fzp = roe.NormalFZP(beamLine, 'FZP', [0, 10., 0], pitch=np.pi/2,
# material=mGold, f=2., E=E0, N=50)
beamLine.fzp = roe.GeneralFZPin0YZ(
beamLine, 'FZP', [0, 10., 0], pitch=np.pi/2,
material=mGold, f1='inf', f2=(0, 0, 2.), E=E0, N=500, phaseShift=np.pi)
# source.dx = 2 * beamLine.fzp.rn[-1]
# source.dz = source.dx
beamLine.fzp.order = 1
beamLine.fsm2 = rsc.Screen(beamLine, 'DiamondFSM2', (0, 12., 0))
return beamLine
def run_process(beamLine, shineOnly1stSource=False):
beamSource = beamLine.sources[0].shine()
# beamLine.feFixedMask.propagate(beamSource)
beamFSM1 = beamLine.fsm1.expose(beamSource)
beamFZPglobal, beamFZPlocal = beamLine.fzp.reflect(beamSource)
beamFSM2 = beamLine.fsm2.expose(beamFZPglobal)
outDict = {'beamSource': beamSource, 'beamFSM1': beamFSM1,
'beamFZPglobal': beamFZPglobal,
'beamFZPlocal': beamFZPlocal,
'beamFSM2': beamFSM2}
if showIn3D:
beamLine.prepare_flow()
return outDict
rr.run_process = run_process
def define_plots(beamLine):
fwhmFormatStrE = '%.2f'
plots = []
# plot = xrtp.XYCPlot(
# 'beamFSM1', (1,), xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m'),
# yaxis=xrtp.XYCAxis(r'$z$', r'$\mu$m'), title='FSM1_E')
# plot.caxis.fwhmFormatStr = None
# plot.saveName = [plot.title + '.png', ]
# plots.append(plot)
#
plot = xrtp.XYCPlot(
'beamFZPlocal', (1, -1),
xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m', bins=512, ppb=1,
limits=[-12, 12]),
yaxis=xrtp.XYCAxis(r'$y$', r'$\mu$m', bins=512, ppb=1,
limits=[-12, 12]),
caxis='category',
title='localZ')
plot.caxis.fwhmFormatStr = None
plot.textPanel = plot.ax1dHistX.text(
0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes,
ha='center', va='bottom')
plots.append(plot)
plot = xrtp.XYCPlot(
'beamFZPlocal', (1, -1),
xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m', bins=512, ppb=1),
yaxis=xrtp.XYCAxis(r'$y$', r'$\mu$m', bins=512, ppb=1),
caxis='category',
title='localFull')
plot.caxis.fwhmFormatStr = None
plot.textPanel = plot.ax1dHistX.text(
0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes,
ha='center', va='bottom')
plots.append(plot)
plot = xrtp.XYCPlot(
'beamFSM2', (1,),
xaxis=xrtp.XYCAxis(r'$x$', r'nm', bins=256, ppb=1, limits=[-500, 500]),
yaxis=xrtp.XYCAxis(r'$z$', r'nm', bins=256, ppb=1, limits=[-500, 500]),
caxis='category',
title='FSM2_Es')
plot.caxis.fwhmFormatStr = fwhmFormatStrE
plot.fluxFormatStr = '%.2e'
plot.textPanel = plot.ax1dHistX.text(
0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes,
ha='center', va='bottom')
plots.append(plot)
return plots
def plot_generator(plots, beamLine):
nShifts = 8
phaseShifts = np.arange(0, nShifts, dtype=float) / nShifts * 2 * np.pi
strPhaseShifts = ('0', r'$\pi/4$', r'$\pi/2$', r'$3\pi/4$',
r'$\pi$', r'$5\pi/4$', r'$3\pi/2$', r'$7\pi/4$')
for iPhaseShift, (phaseShift, strPhaseShift) in\
enumerate(zip(phaseShifts, strPhaseShifts)):
beamLine.fzp.set_phase_shift(phaseShift)
for plot in plots:
plot.saveName = ['FZP-{0}{1}.png'.format(
plot.title, iPhaseShift)]
try:
plot.textPanel.set_text(u'phase shift = {0}'.format(
strPhaseShift))
except AttributeError:
pass
yield
def main():
beamLine = build_beamline()
if showIn3D:
beamLine.glow(scale=[100, 10, 100], centerAt='FZP',
colorAxis='xzprime')
return
plots = define_plots(beamLine)
xrtr.run_ray_tracing(plots, repeats=360, generator=plot_generator,
beamLine=beamLine, processes='half')
#this is necessary to use multiprocessing in Windows, otherwise the new Python
#contexts cannot be initialized:
if __name__ == '__main__':
main()
|
the-stack_0_8657 | import deserialize
import pytest
import mumoco
@pytest.fixture
def remote():
return mumoco.Remote("myName", "myUrl")
def test_default_values(remote):
assert remote.name == "myName"
assert remote.url == "myUrl"
assert remote.verify_ssl is True
assert remote.priority == 0
assert remote.force is False
assert remote.login is False
def test_remote_deserialize(remote):
data = {
"name": "myName",
"url": "myUrl",
}
temp: mumoco.Remote = deserialize.deserialize(mumoco.Remote, data)
assert temp == remote
|
the-stack_0_8658 | # -*- coding: utf-8 -*-
import mne
import os.path as op
raw_dir = '/brainstudio/MEG/metwo/metwo_101/181206/'
raw_files = ['metwo_101_7m_01_raw.fif',
'metwo_101_7m_02_raw.fif',
'metwo_101_04_raw.fif',
'metwo_101_03_raw.fif']
for file in raw_files:
file_path = op.join(raw_dir + file)
raw_info = mne.io.read_raw_fif(file_path, allow_maxshield=True)
events = mne.find_events(raw_info, mask=1)
print('Events for file %s:' % file)
print('This file had %d events.' % len(events))
|
the-stack_0_8659 | import redis
import os, time, multiprocess, logging, sys
import json
from compute import Config_ini
from compute.log import Log
from compute.file import get_algo_local_dir, get_population_dir
def get_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(message)s')
fileHandler = logging.FileHandler(log_file, mode='a')
fileHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
return logging.getLogger(logger_name)
class RedisLog(object):
MSG_TYPE = ['LOG', 'WRITE_FILE']
def __init__(self, name, db_ip, db_port):
pool = redis.ConnectionPool(host=db_ip, port=int(db_port), socket_connect_timeout=1)
r = redis.Redis(connection_pool=pool, db=1)
connection_flag = True
try:
r.ping()
except Exception as e:
connection_flag = False
Log.info('Connect redis error, please exit manually,ip:%s db_port:%s errors:%s' % (db_ip, db_port, str(e)))
sys.exit()
if connection_flag:
Log.info('Connect redis successfully...')
self.r = r
self.name = name
def info(self, info):
self._writedb('LOG', info)
def write_file(self, fdir, fname, data):
content = {'fdir': fdir, 'fname': fname, 'data': data}
self._writedb('WRITE_FILE', content)
def _writedb(self, msg_type, content):
assert msg_type in self.MSG_TYPE
v = {'name': self.name, 'type': msg_type, 'content': content}
v = json.dumps(v).encode('utf-8')
self.r.rpush('RUN', v)
def _readdb(self):
info = self.r.lpop('RUN')
if info is None:
return None
# print(info)
info = json.loads(info.decode('utf-8'))
return info
@staticmethod
def run_dispatch_service():
Log.info('Start to read message from Redis')
db_ip = Config_ini.log_server
db_port = Config_ini.log_server_port
alg_local_dir = get_algo_local_dir()
population_dir = get_population_dir()
def proc_func():
rdb = RedisLog('', db_ip, db_port)
log_dict = {}
log_dir = os.path.join(alg_local_dir, 'log')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
while True:
data = rdb._readdb()
if data is not None:
name, dtype, content = data['name'], data['type'], data['content']
Log.info('Redis is reading: name:%s, type:%s, content:%s' % (name, dtype, content))
if dtype == 'LOG':
# create logger.
if name not in log_dict:
log_file = os.path.join(log_dir, name)
logger = get_logger(name, log_file)
log_dict[name] = logger
logger = log_dict[name]
logger.info(content)
# print(content)
elif dtype == 'WRITE_FILE':
fdir, fname, fdata = content['fdir'], content['fname'], content['data']
if fdir == 'CACHE' or fdir == 'RESULTS':
fdir = population_dir
if not os.path.exists(fdir):
os.makedirs(fdir)
with open(os.path.join(fdir, fname), 'a+') as f:
f.write(fdata)
f.flush()
else:
assert 0
time.sleep(1)
proc = multiprocess.Process(target=proc_func)
proc.start()
|
the-stack_0_8661 | # -*- coding: utf-8 -*-
"""
this module contains all usable variables for native python type.
"""
import datetime
import re
import six
from booleano.operations.operands.classes import Variable
from booleano.operations.operands.constants import String
from booleano.parser.symbol_table_builder import SymbolTableBuilder
variable_symbol_table_builder = SymbolTableBuilder()
@variable_symbol_table_builder.register(type(None))
@six.python_2_unicode_compatible
class NativeVariable(Variable):
"""
a generic Bindable item that will resolve from the context with
his given name. it shall be subclass for more specific operations, but
it work as is using the python type operations.
it can be lazy if the given context_name is a callable, in this case, the callable
will be called with the current context
"""
operations = {
"equality", # ==, !=
"inequality", # >, <, >=, <=
"boolean", # Logical values
}
def __init__(self, context_name):
self.evaluated = False
self.context_name = context_name
super(NativeVariable, self).__init__()
def to_python(self, context):
"""Return the value of the ``bool`` context item"""
self.evaluated = True
if callable(self.context_name):
return self.context_name(context)
return context[self.context_name]
def equals(self, value, context):
"""Does ``value`` equal this variable?"""
self.evaluated = True
if isinstance(value, (String, six.text_type)):
value = self._from_native_string(six.text_type(value))
return self.to_python(context) == value
def greater_than(self, value, context):
"""Does thes variable is greater than ``value``"""
self.evaluated = True
if isinstance(value, (String, six.text_type)):
value = self._from_native_string(six.text_type(value))
return self.to_python(context) > value
def less_than(self, value, context):
"""Does thes variable is lesser than ``value``"""
self.evaluated = True
if isinstance(value, (String, six.text_type)):
value = self._from_native_string(six.text_type(value))
return self.to_python(context) < value
def __call__(self, context):
"""Does this variable evaluate to True?"""
self.evaluated = True
return bool(self.to_python(context))
def _from_native_string(self, value):
"""
special case where a variable can interperete
a sting to other think (a date, a duration ?)
:param value:
:return:
"""
return value
def __str__(self):
"""Return the Unicode representation of this variable."""
return 'Scop variable for %s [%s]' % (self.context_name, self.__class__.__name__)
def __repr__(self):
"""Represent this variable."""
return '<Scop variable for %s [%s]>' % (self.context_name, self.__class__.__name__)
@variable_symbol_table_builder.register(list)
@variable_symbol_table_builder.register(tuple)
class NativeCollectionVariable(NativeVariable):
operations = {
"equality", # ==, !=
"inequality", # >, <, >=, <=
"boolean", # Logical values
"membership",
}
def belongs_to(self, value, context):
"""does this variable belong to (in) """
self.evaluated = True
return value in self.to_python(context)
def is_subset(self, value, context):
"""
a strict subset contains some element, but not all.
(belongs_to can contains all elements)
:param value:
:param context:
:return:
"""
self.evaluated = True
cv = self.to_python(context)
return cv != value and value in cv
@variable_symbol_table_builder.register(int)
@variable_symbol_table_builder.register(float)
class NumberVariable(NativeVariable):
"""
a variable that allow to compare **number** from the context
"""
@variable_symbol_table_builder.register(bool)
class BooleanVariable(NativeVariable):
"""
a variable that allow to compare **boolean** from the context
"""
@variable_symbol_table_builder.register(six.text_type)
class StringVariable(NativeCollectionVariable):
"""
a variable that allow to compare **string** from the context
"""
@variable_symbol_table_builder.register(set)
@variable_symbol_table_builder.register(frozenset)
class SetVariable(NativeCollectionVariable):
"""
a variable that allow to compare **set** from the context
"""
def cast_val(self, value):
if not isinstance(value, set):
value = {value}
return value
def belongs_to(self, value, context):
"""does this variable belong to (in) """
self.evaluated = True
cv = self.to_python(context)
value = self.cast_val(value)
return value <= cv
def is_subset(self, value, context):
"""
a strict subset contains some element, but not all.
(belongs_to can contains all elements)
:param value:
:param context:
:return:
"""
self.evaluated = True
cv = self.to_python(context)
value = self.cast_val(value)
return value < cv
class FormatableVariable(NativeVariable):
"""
a class that accept a extra format in his constructor
"""
formats = []
def __init__(self, context_name, formats=None):
if isinstance(formats, six.text_type):
self.formats = (formats, )
elif formats is not None:
self.formats = formats
super(FormatableVariable, self).__init__(context_name)
@variable_symbol_table_builder.register(datetime.timedelta)
class DurationVariable(FormatableVariable):
"""
a variable that allow to compare **duration** from the context (datetime.timedelta)
the compartion can be made with a string matching the folowing format :
+ **days** d **hours** h **minutes** m **seconds** s
+ **days** days **hours** hours **minutes** minutes **seconds** seconds
ie :
+ duration > "15 d 7 h 8 m 19 s"
+ duration > "15d 24s"
"""
formats = [
(
r'^((?P<days>\d+?) ?d(ays)?)? *'
r'((?P<hours>\d+?) ?h(r|ours?)?)? *'
r'((?P<minutes>\d+?) ?m(inutes)?)? *'
r'((?P<seconds>\d+?) ?s(econds)?)? *$'
)
]
def __init__(self, context_name, formats=None):
super(DurationVariable, self).__init__(context_name, formats)
self.regexps = [
re.compile(regex) for regex in self.formats
]
def _from_native_string(self, value):
"""
parse a string as a date using self.formats.
:param value: the date as a string. matching one of the format
:return: the datetime object
:rtype: datetime.datetime
"""
for regex in self.regexps:
match = regex.search(value)
if match:
res = {unit: int(val) for unit, val in match.groupdict().items() if val is not None}
if res:
return datetime.timedelta(**res)
raise ValueError("bad date format for %s: tied %r" % (value, self.formats))
@variable_symbol_table_builder.register(datetime.datetime)
class DateTimeVariable(FormatableVariable):
"""
a variable that allow to compare **datetime** from the context (datetime.datetime)
the compartion can be made with a string matching the folowing format :
- %d/%m/%Y %H:%M:%S
- %d-%m-%Y %H:%M:%S
- %Y/%m/%d %H:%M:%S
- %Y-%m-%d %H:%M:%S
or you can pass your own formats in the construction
.. code::
DateTimeVariable("context_name", formats=["%Y-%m-%d %H:%M:%S"])
"""
formats = (
"%d/%m/%Y %H:%M:%S",
"%d-%m-%Y %H:%M:%S",
"%Y/%m/%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S",
)
def _from_native_string(self, value):
"""
parse a string as a date using self.formats.
:param value: the date as a string. matching one of the format
:return: the datetime object
:rtype: datetime.datetime
"""
for format in self.formats:
try:
return datetime.datetime.strptime(value, format)
except ValueError:
pass
raise ValueError("bad date format for %s: tied %r" % (value, self.formats))
@variable_symbol_table_builder.register(datetime.date)
class DateVariable(DateTimeVariable):
"""
a variable that allow to compare **date** from the context (datetime.date)
the compartion can be made with a string matching the folowing format :
- %d/%m/%Y
- %d-%m-%Y
- %Y/%m/%d
- %Y-%m-%d
or you can pass your own formats in the construction
.. code::
DateVariable("context_name", formats=["%Y %m %d"])
"""
formats = (
"%d/%m/%Y",
"%d-%m-%Y",
"%Y/%m/%d",
"%Y-%m-%d",
)
def _from_native_string(self, value):
return super(DateVariable, self)._from_native_string(value).date()
|
the-stack_0_8662 | import handle_input as input
import game_flags as flags
import pygame as pg
class Food(pg.sprite.Sprite):
# Constructor
def __init__(self, pos=(-1, -1)):
# Call the parent class (Sprite) constructor
pg.sprite.Sprite.__init__(self)
size = (32, 32)
self.pos = pos
position = tuple([ele * 32 for ele in reversed(self.pos)])
self.rect = pg.Rect(position, size)
self.images = []
for food_img in flags.FOOD:
img, _ = input.load_image(flags.FOOD_TYPE, food_img)
self.images.append(img)
self.index = 0
self.image = self.images[self.index]
self.animation_time = 0.1
self.current_time = 0
self.animation_frames = 8
self.current_frame = 0
def update_time_dependent(self, dt):
"""
Updates the image of Sprite approximately every 0.1 second.
Args:
dt: Time elapsed between each frame.
"""
self.current_time += dt
if self.current_time >= self.animation_time:
self.current_time = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update_frame_dependent(self):
"""
Updates the image of Sprite every 6 frame (approximately every 0.1 second if frame rate is 60).
"""
self.current_frame += 1
if self.current_frame >= self.animation_frames:
self.current_frame = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update(self, dt):
"""This is the method that's being called when 'all_sprites.update(dt)' is called."""
# Switch between the two update methods by commenting/uncommenting.
self.update_time_dependent(dt)
# self.update_frame_dependent()
|
the-stack_0_8663 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("a", help="alphabet size", type=int)
parser.add_argument("l", help="sequence length", type=int)
parser.add_argument("-name", help="name of output folder")
parser.add_argument("-data", help="path to input data",
type=str, required=True)
parser.add_argument(
"-cv", help="estimate lambdas using regularization with regularization parameter chosen with 10-fold crossvalidation", default=True)
import numpy as np
import scipy as sp
import itertools
import sys
import time
import scipy as sp
import itertools
import os
import math
import csv
import pandas as pd
import random as rd
import statistics
from scipy.sparse import csr_matrix, dia_matrix
from scipy.optimize import minimize
from scipy.special import comb
from scipy.spatial.distance import hamming
from scipy.sparse.linalg import LinearOperator
from scipy.sparse.linalg import cg
import vc_regression as vc
def str2bool(v):
return v in ("True", "true", "t", "1")
args = parser.parse_args()
args.cv = str2bool(args.cv)
a = args.a
l = args.l
if args.name == None:
args.name = "my_project"
outdir = args.name
if not os.path.exists(outdir):
os.makedirs(outdir)
# QC
if a**l > 5000000:
print("sequence space is to big!")
exit()
vc.preliminary_preparation(a, l)
data = pd.read_csv(args.data, header=None)
babel = ''
for i in range(len(data)):
babel += data[0][i]
alphabet = set(babel)
AA2N = dict([(sorted(alphabet)[i], i) for i in range(len(alphabet))])
N2AA = {v: k for k, v in AA2N.items()}
AA = list(AA2N.keys())
seqsAll = [''.join(seq) for seq in itertools.product(AA, repeat=l)]
pd.DataFrame(seqsAll).to_csv(
outdir + "/sequences.txt", header=None, index=None)
def seqAA2num(seq):
return [AA2N[seq[i]] for i in range(len(seq))]
####
seqs = [seqAA2num(data[0][i]) for i in range(len(data))]
tr = np.array([vc.seq2pos(seqs[i]) for i in range(len(seqs))])
if np.shape(seqs)[1] != l:
print("seqs file dimension incompatible!")
exit()
ys = np.array(data[1])
sig2s = np.array(data[2])
# vc.set_data_as_global_parameters(seqs, ys, sig2s)
# vc.construct_A_sparse()
# vc.construct_E_sparse()
vc.initialize_computation(seqs, ys, sig2s)
all_distance_class_Q = all(map(lambda x: x > 0, vc.N_d))
rhod = vc.rho_d.copy()
rhod[0] -= np.mean(sig2s)
lambdas_naive = sp.linalg.inv(vc.W_kd.T).dot(rhod)
lambdas_naive_positive_Q = all(map(lambda x: x > 0, lambdas_naive))
if args.cv is True:
print("estimating lambdas with regularization (regularization parameter chosen using 10-fold crossvalidation)...")
cv = True
elif not all_distance_class_Q:
print("certain distance classes missing from data, estimating lambdas using regularization (regularization parameter chosen with 10-fold crossvalidation)...")
cv = True
elif lambdas_naive_positive_Q:
print("estimating lambdas using least squares")
cv = False
else:
print("naive lambdas contain nonpositive values, estimating lambdas using regularization (regularization parameter chosen with 10-fold crossvalidation)...")
cv = True
betas = 10 ** np.arange(-2, 6, .5)
rownames = ["order_" + str(k) for k in range(l + 1)]
# Estimate lambdas using 10 fold crossvalidation
if cv is True:
out = vc.lambdaCV(ys, tr, sig2s, betas, 10)
beta_cv = out[1]
lda = vc.solve_lambda_single_beta(ys, tr, sig2s, beta_cv)
print("lambdas = ", str(lda))
else:
lda = lambdas_naive
print("lambdas = ", str(lda))
pd.DataFrame(lda, index=rownames).to_csv(outdir + "/lambdas.txt", header=None)
mks = [comb(l, k) * (a - 1)**k for k in range(l + 1)]
variance_components = np.array([lda[k] * mks[k] for k in range(1, l + 1)])
variance_components /= np.sum(variance_components)
print("variance components = ", str(variance_components))
pd.DataFrame(variance_components, index=rownames[1:]).to_csv(
outdir + "/variance_components.txt", header=None)
|
the-stack_0_8665 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='message_media_messages',
version='2.0.0',
description='The MessageMedia Messages API provides a number of endpoints for building powerful two-way messaging applications.',
long_description=long_description,
author='MessageMedia Developers',
author_email='[email protected]',
url='https://developers.messagemedia.com',
packages=find_packages(),
install_requires=[
'requests>=2.9.1, <3.0',
'jsonpickle>=0.7.1, <1.0',
'cachecontrol>=0.11.7, <1.0',
'python-dateutil>=2.5.3, <3.0'
]
)
|
the-stack_0_8666 | import sys
import json
import numpy as np
from flask import Flask, request, jsonify, make_response
# from flask import session
from flask import render_template, send_from_directory
from flask_cors import CORS
import lib.recommender_tools as rec_tools
from lib.recommender_data import RECCOMEND_DATA
from lib.tools import json_response, load_input
import lib.look_up_table as look_up_table
from gevent.pywsgi import WSGIServer
VERBOSE = True
REC_DATA = RECCOMEND_DATA()
app = Flask(__name__, static_url_path='')
CORS(app)
# set this bugger by default.
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/js/<path:path>')
def send_js(path):
# offer up the js and css files for consumption
return send_from_directory('templates/js', path)
@app.route('/css/<path:path>')
def send_css(path):
# offer up the js and css files for consumption
return send_from_directory('templates/css', path)
@app.route('/images/<path:path>')
def send_image(path):
# offer up the js and css files for consumption
return send_from_directory('templates/images', path)
@app.route('/', methods=['POST', 'GET'])
def page_test():
""" Sanity check for flask application (used in automated tests)
"""
# get user-name and access rights from IAM
html = "<h3>Hello world! 3</h3>"
return html
@app.route('/demo1', methods=['POST', 'GET'])
def initial_form():
""" Sanity check for flask application (used in automated tests)
"""
return render_template('demo_page.html', port=port)
@app.route('/example_form_interface', methods=['GET'])
def basic_demo2():
"""
"""
return render_template('basic_form_and_results.html')
@app.route('/list_searchable_parameters', methods=['GET'])
def list_searchable_parameters():
print('here', file=sys.stdout)
inputs = REC_DATA.list_input_keys_values()
print('inputs',inputs, file=sys.stdout)
targets = look_up_table.LOOK_UP_TABLE['campaign_objective']
print('targets', targets, file=sys.stdout)
return json_response({"inputs": inputs, "targets": targets})
@app.route('/recommend_sort_games', methods=['POST'])
def make_recommendation():
"""Based on the user's objective, this function selects matches and returns scores and meta data
"""
event_rates = ['click-through-event', 'first_dropped', 'impression']
# Load the input
json_dict = load_input(request)
#json_dict = ast.literal_eval(json_str)
if VERBOSE:
print('json_dict', json_dict, file=sys.stdout)
# beware campaign_objective also sent in
slice_parameters = json_dict #[{i: json_dict[i]} for i in json_dict if i != 'campaign_objective']
# set default objects if none given
objectives = json_dict.get('campaign_objective', look_up_table.LOOK_UP_TABLE['campaign_objective'])
if isinstance(objectives, list) is False:
objectives = [objectives]
print('objectives', objectives, file=sys.stdout)
# assure the objectives are reasonable
for obj in objectives:
assert obj in look_up_table.LOOK_UP_TABLE['campaign_objective']
# identify rows matching the input query params
matching_rows = REC_DATA.extract_data_slice(slice_parameters)
# summ all events for each line_item_id matching above results
gm_kys_view = REC_DATA.sum_events(
matching_rows, ['first_key'], event_rates)
# get a list of unique game ids
uniq_games = list(gm_kys_view.keys())
for game_id in uniq_games:
# calculate rates, and scores
gm_kys_view[game_id]['click_through_rate'] = REC_DATA.calculates_rates(gm_kys_view[game_id]['click-through-event'], gm_kys_view[game_id]['impression'])
gm_kys_view[game_id]['engagement_rate'] = REC_DATA.calculates_rates(gm_kys_view[game_id]['first_dropped'], gm_kys_view[game_id]['impression'])
# calculate the specific score for this game
gm_kys_view[game_id]['rec_scores'] = REC_DATA.calculate_score([gm_kys_view[game_id][obj] for obj in objectives])
# sort the games based on 'decreasing' score
ind_sort = np.argsort([gm_kys_view[game_id]['rec_scores'] for game_id in uniq_games])[::-1]
# generate a results list of score and games
rec_score = []
for i in ind_sort:
game_id = uniq_games[i]
# get all the additional feautures for this game
game_features = REC_DATA.extract_game_features(game_id=game_id)
rec_score.append({'game_id': game_id,
'score': gm_kys_view[game_id]['rec_scores'], 'game_features': game_features
})
if VERBOSE:
print('rec_score', rec_score, file=sys.stdout)
pass
return json_response(rec_score)
@app.route('/get_data_dump', methods=['GET'])
def get_engine_output():
"""Returns a dictionary with all data used in the rec ending and their metadata."""
res = {"game_data": REC_DATA.data}
return json_response(res)
@app.route('/get_feature_dump', methods=['GET'])
def get_feature_output():
"""Returns a dictionary with all data used in the rec ending and their metadata."""
res = {"game_features": REC_DATA.game_features}
return json_response(res)
def create_app():
""" Constructor
Returns
-------
app : flask app
"""
return app
# if __name__ == "__main__":
# if len(sys.argv) > 1:
# port = int(sys.argv[1])
# else:
# port = 80
# # app = create_app(config=None)
# # , use_reloader=False) # remember to set debug to False
# app.run(host='0.0.0.0', port=port, debug=VERBOSE)
if __name__ == '__main__':
# Debug/Development
# app.run(debug=True, host="0.0.0.0", port="5000")
# Production
http_server = WSGIServer(('', 5000), app)
http_server.serve_forever()
|
the-stack_0_8668 | import functools
import json
import textwrap
import mongoengine
from .. import util
from .. import entities
from . import look, verb
import architext.strings as strings
class LobbyMenu(verb.Verb):
'''Helper class that has the method that shows the lobby menu'''
def show_lobby_menu(self):
out_message = ""
self.session.world_list_cache = self.get_worlds_list()
world_list = self.session.world_list_cache
if world_list:
out_message += _('Enter the number of the world you want to enter\n')
world_names_with_index = [f' {index: < 4} {world.name: <36} {world.get_connected_users()}{chr(128100)} by {world.creator.name} {"" if world.public else chr(128274)}' for index, world in enumerate(world_list)]
out_message += functools.reduce(lambda a, b: '{}\n{}'.format(a, b), world_names_with_index)
else:
out_message += _('There are not public or known private worlds in this server.')
out_message += '\n\n' + _(
'Options:\n'
' + to create a new world.\n'
' ? to see all available actions.'
)
self.session.send_to_client(out_message)
def get_worlds_list(self):
return list(filter(self.has_to_be_listed, entities.World.objects()))
def has_to_be_listed(self, world):
if world.public:
return True
elif world.creator == self.session.user:
return True
elif world in self.session.user.joined_worlds:
return True
else:
return False
class LobbyHelp(LobbyMenu):
command = '?'
verbtype = verb.LOBBYVERB
def process(self, message):
out_message = _(
'You can use these commands from the lobby:\n'
' + to create a new world.\n'
' - to delete one of your worlds.\n'
' r to reload and show the list of worlds.\n'
' * to deploy a public world snapshot.\n'
' > to import a world from text.\n'
' who to see who is connected right now.\n'
'\n'
'Enter the number of a world in the world list to go there.\n'
'Enter the invite code of a world to go there.'
)
self.session.send_to_client(out_message)
self.finish_interaction()
class GoToLobby(LobbyMenu):
command = _('exitworld')
permissions = verb.NOBOT
def process(self, message):
self.session.user.leave_world()
self.show_lobby_menu()
self.finish_interaction()
class JoinByInviteCode(LobbyMenu):
command = ''
verbtype = verb.LOBBYVERB
@classmethod
def has_world_id_format(cls, string):
return len(string.strip()) == 24
@classmethod
def can_process(cls, message, session):
if super().can_process(message, session) and cls.has_world_id_format(message):
return True
else:
return False
def process(self, message):
try:
chosen_world = entities.World.objects.get(id=message)
except entities.World.DoesNotExist:
self.session.send_to_client(_("I don't understand that"))
self.finish_interaction()
return
self.session.user.enter_world(chosen_world)
self.session.send_to_client(_("Traveling to {world_name}.").format(world_name=chosen_world.name))
look.Look(self.session).show_current_room(show_world_name=True)
self.session.send_to_others_in_room(_("Pof! {player_name} appears here.").format(player_name=self.session.user.name))
self.finish_interaction()
class EnterWorld(LobbyMenu):
command = ''
verbtype = verb.LOBBYVERB
@classmethod
def can_process(self, message, session):
if super().can_process(message, session) and message.isnumeric():
return True
else:
return False
def __init__(self, session):
super().__init__(session)
self.current_process_function = self.process_world_number
def process(self, message):
self.current_process_function(message)
def process_world_number(self, message):
try:
index = int(message)
except ValueError:
self.session.send_to_client(strings.not_a_number)
self.finish_interaction()
return
try:
chosen_world = self.session.world_list_cache[index]
except IndexError:
self.session.send_to_client(strings.wrong_value)
self.finish_interaction()
return
try:
location_save = self.session.user.get_location_save(chosen_world)
self.session.user.enter_world(chosen_world)
except mongoengine.errors.DoesNotExist:
self.session.send_to_client(_("This world no longer exists. Enter 'r' to reload the lobby."))
self.finish_interaction()
return
self.session.send_to_client(_('{body}')
.format(
body=_('Returning to your last location there.') if location_save is not None else _('Going there for the first time!')
))
look.Look(self.session).show_current_room(show_world_name=True)
self.session.send_to_others_in_room(_("Puufh! {player_name} appears here.").format(player_name=self.session.user.name))
self.finish_interaction()
class RefreshLobby(LobbyMenu):
verbtype = verb.LOBBYVERB
command = 'r'
def process(self, message):
self.show_lobby_menu()
self.finish_interaction()
class CreateWorld(LobbyMenu):
verbtype = verb.LOBBYVERB
command = '+'
def process(self, message):
starting_room = entities.Room(
name=_('The First of Many Rooms'),
alias='0',
description=_(
'This is the first room of your world. Here you are the Architext!\n'
'\n'
'If you don\'t know where to start, just type "help building". There you\'ll find all you need to know to build any kind of world.\n'
'\n'
'Remember that you can type "worldinfo" to see the world\'s invite code.'
)
)
self.new_world = entities.World(save_on_creation=False, creator=self.session.user, starting_room=starting_room)
self.session.send_to_client(_('Enter the name for your new world. ("/" to cancel)'))
self.process = self.process_word_name
def process_word_name(self, message):
if message == "/":
self.session.send_to_client(strings.cancelled)
self.finish_interaction()
return
if not message:
self.session.send_to_client(strings.is_empty)
return
self.new_world.name = message
self.new_world.save()
self.session.send_to_client(_(
'┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n'
'┃ Your new world is ready ┃\n'
'┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n'
'It is a private world 🔒. You can invite your friends sharing this invite code:\n'
'\n'
'{invite_code}\n'
'\n'
'When it is ready, you can make the world public using the editworld command.\n'
'\n'
'Press enter to see your new world...'
).format(invite_code=self.new_world.id))
self.process = self.enter_to_continue
def enter_to_continue(self, message):
self.session.user.enter_world(self.new_world)
look.Look(self.session).show_current_room(show_world_name=True)
self.finish_interaction()
class DeployPublicSnapshot(LobbyMenu):
verbtype = verb.LOBBYVERB
command = '*'
def process(self, message):
self.public_snapshots = entities.WorldSnapshot.objects(public=True)
if not self.public_snapshots:
self.session.send_to_client(_('There are no public worlds to deploy.'))
self.finish_interaction()
return
message = _('Which world do you want to deploy? ("/" to cancel)\n')
for index, snapshot in enumerate(self.public_snapshots):
message += '{}. {}\n'.format(index, snapshot.name)
self.session.send_to_client(message)
self.process = self.process_menu_option
def process_menu_option(self, message):
if message == '/':
self.session.send_to_client(strings.cancelled)
self.show_lobby_menu()
self.finish_interaction()
return
try:
index = int(message)
if index < 0:
raise ValueError
except ValueError:
self.session.send_to_client(strings.not_a_number)
return
try:
self.chosen_snapshot = self.public_snapshots[index]
except IndexError:
self.session.send_to_client(strings.wrong_value)
return
self.session.send_to_client(_('How do you want to name the new world? ("/" to cancel)'))
self.process = self.process_new_world_name
def process_new_world_name(self, message):
if message == "/":
self.session.send_to_client(strings.cancelled)
self.finish_interaction()
return
if not message:
self.session.send_to_client(strings.is_empty)
return
world_name = message
self.deploy_at_new_world(self.chosen_snapshot, world_name)
self.session.send_to_client(_('Done.'))
self.show_lobby_menu()
self.finish_interaction()
def deploy_at_new_world(self, snapshot, world_name):
snapshot_instance = snapshot.snapshoted_state.clone()
new_world = entities.World(creator=self.session.user, world_state=snapshot_instance, name=world_name)
class DeleteWorld(LobbyMenu):
verbtype = verb.LOBBYVERB
command = '-'
def process(self, message):
self.your_worlds = entities.World.objects(creator=self.session.user)
if not self.your_worlds:
self.session.send_to_client(_("You have not created any world."))
self.finish_interaction()
return
message = _('Choose the world to delete. YOU WON\'T BE ABLE TO GET IT BACK. Consider making a backup first. ("/" to cancel)\n')
for index, world in enumerate(self.your_worlds):
message += "{}. {}\n".format(index, world.name)
self.session.send_to_client(message)
self.process = self.process_menu_option
def process_menu_option(self, message):
if message == '/':
self.session.send_to_client(strings.cancelled)
self.show_lobby_menu()
self.finish_interaction()
return
try:
index = int(message)
if index < 0:
raise ValueError
except ValueError:
self.session.send_to_client(strings.not_a_number)
return
try:
world_to_delete = self.your_worlds[index]
except IndexError:
self.session.send_to_client(strings.wrong_value)
return
try:
world_to_delete.delete()
except entities.CantDelete as e:
self.session.send_to_client(_("It can not be deleted: {error}".format(error=e)))
else:
self.session.send_to_client(_("Done."))
self.show_lobby_menu()
self.finish_interaction()
class ImportWorld(LobbyMenu):
verbtype = verb.LOBBYVERB
command = '>'
def process(self, message):
self.json_message = ''
self.world_name = ''
self.session.send_to_client(_('Enter a name for your new world. ("/" to cancel)'))
self.process = self.process_word_name
def process_word_name(self, message):
if message == "/":
self.session.send_to_client(strings.cancelled)
self.finish_interaction()
return
if not message:
self.session.send_to_client(strings.is_empty)
return
self.world_name = message
self.session.send_to_client(_(
'Now paste the text export of the world.\n'
'It will be automatically divided into multiple messages if it is too long.'
'The server won\'t consider the text as received until it is valid.\n'
'If you entered the wrong text, send "/" to cancel.'
))
self.process = self.process_world_json
def process_world_json(self, message):
# todo: check for possible risks and outcomes of bad input.
if message == '/':
self.session.send_to_client(strings.cancelled)
self.show_lobby_menu()
self.finish_interaction()
return
self.session.send_to_client(_("{char_number} chars received").format(char_number=len(message)))
message_valid = False
message_without_control_characters = util.remove_control_characters(message)
self.json_message += message_without_control_characters
self.session.send_to_client(_('Parsing your message. Please wait...'))
world_dict = util.text_to_world_dict(self.json_message)
if world_dict is not None:
new_world = util.world_from_dict(world_dict, self.world_name, self.session.user)
self.session.send_to_client(_('Your new world is ready. The items in all player inventories from the original world have been moved to your inventory.'))
self.show_lobby_menu()
self.finish_interaction()
else:
self.session.send_to_client(_('The text is still invalid. Waiting for more characters. ("/" to cancel)'))
|
the-stack_0_8669 | # Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import time
import threading
from concurrent import futures
from enum import Enum
from datetime import datetime
from typing import List, Any, Mapping, MutableMapping, Optional, Callable, Tuple
import grpc
from ._cmd import CommandResult, _run_pulumi_cmd, OnOutput
from ._config import ConfigValue, ConfigMap
from .errors import StackAlreadyExistsError
from .events import OpMap, EngineEvent, SummaryEvent
from ._output import OutputMap
from ._server import LanguageServer
from ._workspace import Workspace, PulumiFn, Deployment
from ..runtime.settings import _GRPC_CHANNEL_OPTIONS
from ..runtime.proto import language_pb2_grpc
from ._representable import _Representable
_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
OnEvent = Callable[[EngineEvent], Any]
class ExecKind(str, Enum):
LOCAL = "auto.local"
INLINE = "auto.inline"
class StackInitMode(Enum):
CREATE = "create"
SELECT = "select"
CREATE_OR_SELECT = "create_or_select"
class UpdateSummary:
def __init__(self,
# pre-update info
kind: str,
start_time: datetime,
message: str,
environment: Mapping[str, str],
config: Mapping[str, dict],
# post-update info
result: str,
end_time: datetime,
version: Optional[int] = None,
deployment: Optional[str] = None,
resource_changes: Optional[OpMap] = None):
self.kind = kind
self.start_time = start_time
self.end_time = end_time
self.message = message
self.environment = environment
self.result = result
self.Deployment = deployment
self.resource_changes = resource_changes
self.version = version
self.config: ConfigMap = {}
for key in config:
config_value = config[key]
self.config[key] = ConfigValue(value=config_value["value"], secret=config_value["secret"])
def __repr__(self):
return f"UpdateSummary(result={self.result!r}, version={self.version!r}, " \
f"start_time={self.start_time!r}, end_time={self.end_time!r}, kind={self.kind!r}, " \
f"message={self.message!r}, environment={self.environment!r}, " \
f"resource_changes={self.resource_changes!r}, config={self.config!r}, Deployment={self.Deployment!r})"
class BaseResult(_Representable):
def __init__(self, stdout: str, stderr: str):
self.stdout = stdout
self.stderr = stderr
class PreviewResult(BaseResult):
def __init__(self, stdout: str, stderr: str, change_summary: OpMap):
super().__init__(stdout, stderr)
self.change_summary = change_summary
class UpResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary, outputs: OutputMap):
super().__init__(stdout, stderr)
self.outputs = outputs
self.summary = summary
class RefreshResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary):
super().__init__(stdout, stderr)
self.summary = summary
class DestroyResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary):
super().__init__(stdout, stderr)
self.summary = summary
class Stack:
@classmethod
def create(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Creates a new stack using the given workspace, and stack name.
It fails if a stack with that name already exists.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.CREATE)
@classmethod
def select(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Selects stack using the given workspace, and stack name.
It returns an error if the given Stack does not exist.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.SELECT)
@classmethod
def create_or_select(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Tries to create a new stack using the given workspace and stack name if the stack does not already exist,
or falls back to selecting the existing stack. If the stack does not exist,
it will be created and selected.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.CREATE_OR_SELECT)
def __init__(self, name: str, workspace: Workspace, mode: StackInitMode) -> None:
"""
Stack is an isolated, independently configurable instance of a Pulumi program.
Stack exposes methods for the full pulumi lifecycle (up/preview/refresh/destroy), as well as managing configuration.
Multiple Stacks are commonly used to denote different phases of development
(such as development, staging and production) or feature branches (such as feature-x-dev, jane-feature-x-dev).
"""
self.name = name
self.workspace = workspace
self._mode = mode
if not isinstance(name, str):
raise TypeError("name must be of type 'str'")
if not isinstance(workspace, Workspace):
raise TypeError("workspace must be of type 'Workspace'")
if not isinstance(mode, StackInitMode):
raise TypeError("mode must be of type 'StackInitMode'")
if mode is StackInitMode.CREATE:
workspace.create_stack(name)
elif mode is StackInitMode.SELECT:
workspace.select_stack(name)
elif mode is StackInitMode.CREATE_OR_SELECT:
try:
workspace.create_stack(name)
except StackAlreadyExistsError:
workspace.select_stack(name)
def __repr__(self):
return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r}, mode={self._mode!r})"
def __str__(self):
return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r})"
def up(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
diff: Optional[bool] = None,
target_dependents: Optional[bool] = None,
replace: Optional[List[str]] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None,
program: Optional[PulumiFn] = None) -> UpResult:
"""
Creates or updates the resources in a stack by executing the program in the Workspace.
https://www.pulumi.com/docs/reference/cli/pulumi_up/
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the update operation.
:param target: Specify an exclusive list of resource URNs to destroy.
:param expect_no_changes: Return an error if any changes occur during this update.
:param diff: Display operation as a rich diff showing the overall change.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param replace: Specify resources to replace.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:param program: The inline program.
:returns: UpResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
program = program or self.workspace.program
extra_args = _parse_extra_args(**locals())
args = ["up", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.LOCAL.value
on_exit = None
if program:
kind = ExecKind.INLINE.value
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with
options=_GRPC_CHANNEL_OPTIONS)
language_server = LanguageServer(program)
language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server)
port = server.add_insecure_port(address="0.0.0.0:0")
server.start()
def on_exit_fn():
language_server.on_pulumi_exit()
server.stop(0)
on_exit = on_exit_fn
args.append(f"--client=127.0.0.1:{port}")
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("up")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
up_result = self._run_pulumi_cmd_sync(args, on_output)
outputs = self.outputs()
summary = self.info()
assert summary is not None
finally:
_cleanup(temp_dir, log_watcher_thread, on_exit)
return UpResult(stdout=up_result.stdout, stderr=up_result.stderr, summary=summary, outputs=outputs)
def preview(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
diff: Optional[bool] = None,
target_dependents: Optional[bool] = None,
replace: Optional[List[str]] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None,
program: Optional[PulumiFn] = None) -> PreviewResult:
"""
Performs a dry-run update to a stack, returning pending changes.
https://www.pulumi.com/docs/reference/cli/pulumi_preview/
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message to associate with the preview operation.
:param target: Specify an exclusive list of resource URNs to update.
:param expect_no_changes: Return an error if any changes occur during this update.
:param diff: Display operation as a rich diff showing the overall change.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param replace: Specify resources to replace.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:param program: The inline program.
:returns: PreviewResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
program = program or self.workspace.program
extra_args = _parse_extra_args(**locals())
args = ["preview"]
args.extend(extra_args)
kind = ExecKind.LOCAL.value
on_exit = None
if program:
kind = ExecKind.INLINE.value
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with
options=_GRPC_CHANNEL_OPTIONS)
language_server = LanguageServer(program)
language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server)
port = server.add_insecure_port(address="0.0.0.0:0")
server.start()
def on_exit_fn():
language_server.on_pulumi_exit()
server.stop(0)
on_exit = on_exit_fn
args.append(f"--client=127.0.0.1:{port}")
args.extend(["--exec-kind", kind])
log_file, temp_dir = _create_log_file("preview")
args.extend(["--event-log", log_file])
summary_events: List[SummaryEvent] = []
def on_event_callback(event: EngineEvent) -> None:
if event.summary_event:
summary_events.append(event.summary_event)
if on_event:
on_event(event)
# Start watching logs in a thread
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event_callback))
log_watcher_thread.start()
try:
preview_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread, on_exit)
if not summary_events:
raise RuntimeError("summary event never found")
return PreviewResult(stdout=preview_result.stdout,
stderr=preview_result.stderr,
change_summary=summary_events[0].resource_changes)
def refresh(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None) -> RefreshResult:
"""
Compares the current stack’s resource state with the state known to exist in the actual
cloud provider. Any such changes are adopted into the current stack.
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the refresh operation.
:param target: Specify an exclusive list of resource URNs to refresh.
:param expect_no_changes: Return an error if any changes occur during this update.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:returns: RefreshResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
extra_args = _parse_extra_args(**locals())
args = ["refresh", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("refresh")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
refresh_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread)
summary = self.info()
assert summary is not None
return RefreshResult(stdout=refresh_result.stdout, stderr=refresh_result.stderr, summary=summary)
def destroy(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
target_dependents: Optional[bool] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None) -> DestroyResult:
"""
Destroy deletes all resources in a stack, leaving all history and configuration intact.
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the destroy operation.
:param target: Specify an exclusive list of resource URNs to destroy.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:returns: DestroyResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
extra_args = _parse_extra_args(**locals())
args = ["destroy", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("destroy")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
destroy_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread)
summary = self.info()
assert summary is not None
return DestroyResult(stdout=destroy_result.stdout, stderr=destroy_result.stderr, summary=summary)
def get_config(self, key: str) -> ConfigValue:
"""
Returns the config value associated with the specified key.
:param key: The key for the config item to get.
:returns: ConfigValue
"""
return self.workspace.get_config(self.name, key)
def get_all_config(self) -> ConfigMap:
"""
Returns the full config map associated with the stack in the Workspace.
:returns: ConfigMap
"""
return self.workspace.get_all_config(self.name)
def set_config(self, key: str, value: ConfigValue) -> None:
"""
Sets a config key-value pair on the Stack in the associated Workspace.
:param key: The config key to add.
:param value: The config value to add.
"""
self.workspace.set_config(self.name, key, value)
def set_all_config(self, config: ConfigMap) -> None:
"""
Sets all specified config values on the stack in the associated Workspace.
:param config: A mapping of key to ConfigValue to set to config.
"""
self.workspace.set_all_config(self.name, config)
def remove_config(self, key: str) -> None:
"""
Removes the specified config key from the Stack in the associated Workspace.
:param key: The key to remove from config.
"""
self.workspace.remove_config(self.name, key)
def remove_all_config(self, keys: List[str]) -> None:
"""
Removes the specified config keys from the Stack in the associated Workspace.
:param keys: The keys to remove from config.
"""
self.workspace.remove_all_config(self.name, keys)
def refresh_config(self) -> None:
"""Gets and sets the config map used with the last update."""
self.workspace.refresh_config(self.name)
def outputs(self) -> OutputMap:
"""
Gets the current set of Stack outputs from the last Stack.up().
:returns: OutputMap
"""
return self.workspace.stack_outputs(self.name)
def history(self,
page_size: Optional[int] = None,
page: Optional[int] = None) -> List[UpdateSummary]:
"""
Returns a list summarizing all previous and current results from Stack lifecycle operations
(up/preview/refresh/destroy).
:param page_size: Paginate history entries (used in combination with page), defaults to all.
:param page: Paginate history entries (used in combination with page_size), defaults to all.
:returns: List[UpdateSummary]
"""
args = ["stack", "history", "--json", "--show-secrets"]
if page_size is not None:
# default page=1 when page_size is set
if page is None:
page = 1
args.extend(["--page-size", str(page_size), "--page", str(page)])
result = self._run_pulumi_cmd_sync(args)
summary_list = json.loads(result.stdout)
summaries: List[UpdateSummary] = []
for summary_json in summary_list:
summary = UpdateSummary(kind=summary_json["kind"],
start_time=datetime.strptime(summary_json["startTime"], _DATETIME_FORMAT),
message=summary_json["message"],
environment=summary_json["environment"],
config=summary_json["config"],
result=summary_json["result"],
end_time=datetime.strptime(summary_json["endTime"], _DATETIME_FORMAT),
version=summary_json["version"] if "version" in summary_json else None,
deployment=summary_json["Deployment"] if "Deployment" in summary_json else None,
resource_changes=summary_json["resourceChanges"] if "resourceChanges" in summary_json else None)
summaries.append(summary)
return summaries
def info(self) -> Optional[UpdateSummary]:
"""
Returns the current results from Stack lifecycle operations.
:returns: Optional[UpdateSummary]
"""
history = self.history(page_size=1)
if not history:
return None
return history[0]
def cancel(self) -> None:
"""
Cancel stops a stack's currently running update. It returns an error if no update is currently running.
Note that this operation is _very dangerous_, and may leave the stack in an inconsistent state
if a resource operation was pending when the update was canceled.
This command is not supported for local backends.
"""
self._run_pulumi_cmd_sync(["cancel", "--yes"])
def export_stack(self) -> Deployment:
"""
export_stack exports the deployment state of the stack.
This can be combined with Stack.import_state to edit a stack's state (such as recovery from failed deployments).
:returns: Deployment
"""
return self.workspace.export_stack(self.name)
def import_stack(self, state: Deployment) -> None:
"""
import_stack imports the specified deployment state into a pre-existing stack.
This can be combined with Stack.export_state to edit a stack's state (such as recovery from failed deployments).
:param state: The deployment state to import.
"""
return self.workspace.import_stack(self.name, state)
def _run_pulumi_cmd_sync(self,
args: List[str],
on_output: Optional[OnOutput] = None) -> CommandResult:
envs = {"PULUMI_DEBUG_COMMANDS": "true"}
if self.workspace.pulumi_home is not None:
envs = {**envs, "PULUMI_HOME": self.workspace.pulumi_home}
envs = {**envs, **self.workspace.env_vars}
additional_args = self.workspace.serialize_args_for_op(self.name)
args.extend(additional_args)
args.extend(["--stack", self.name])
result = _run_pulumi_cmd(args, self.workspace.work_dir, envs, on_output)
self.workspace.post_command_callback(self.name)
return result
def _parse_extra_args(**kwargs) -> List[str]:
extra_args: List[str] = []
message = kwargs.get("message")
expect_no_changes = kwargs.get("expect_no_changes")
diff = kwargs.get("diff")
replace = kwargs.get("replace")
target = kwargs.get("target")
target_dependents = kwargs.get("target_dependents")
parallel = kwargs.get("parallel")
if message:
extra_args.extend(["--message", message])
if expect_no_changes:
extra_args.append("--expect-no-changes")
if diff:
extra_args.append("--diff")
if replace:
for r in replace:
extra_args.extend(["--replace", r])
if target:
for t in target:
extra_args.extend(["--target", t])
if target_dependents:
extra_args.append("--target-dependents")
if parallel:
extra_args.extend(["--parallel", str(parallel)])
return extra_args
def fully_qualified_stack_name(org: str, project: str, stack: str) -> str:
"""
Returns a stack name formatted with the greatest possible specificity:
org/project/stack or user/project/stack
Using this format avoids ambiguity in stack identity guards creating or selecting the wrong stack.
Note that filestate backends (local file, S3, Azure Blob) do not support stack names in this
format, and instead only use the stack name without an org/user or project to qualify it.
See: https://github.com/pulumi/pulumi/issues/2522
:param org: The name of the org or user.
:param project: The name of the project.
:param stack: The name of the stack.
:returns: The fully qualified stack name.
"""
return f"{org}/{project}/{stack}"
def _create_log_file(command: str) -> Tuple[str, tempfile.TemporaryDirectory]:
log_dir = tempfile.TemporaryDirectory(prefix=f"automation-logs-{command}-") # pylint: disable=consider-using-with
filepath = os.path.join(log_dir.name, "eventlog.txt")
# Open and close the file to ensure it exists before we start polling for logs
with open(filepath, "w+", encoding="utf-8"):
pass
return filepath, log_dir
def _watch_logs(filename: str, callback: OnEvent):
with open(filename, encoding="utf-8") as f:
while True:
line = f.readline()
# sleep if file hasn't been updated
if not line:
time.sleep(0.1)
continue
event = EngineEvent.from_json(json.loads(line))
callback(event)
# if this is the cancel event, stop watching logs.
if event.cancel_event:
break
def _cleanup(temp_dir: Optional[tempfile.TemporaryDirectory],
thread: Optional[threading.Thread],
on_exit_fn: Optional[Callable[[], None]] = None) -> None:
# If there's an on_exit function, execute it (used in preview/up to shut down server)
if on_exit_fn:
on_exit_fn()
# If we started a thread to watch logs, wait for it to terminate, timing out after 5 seconds.
if thread:
thread.join(5)
# If we created a temp_dir for the logs, clean up.
if temp_dir:
temp_dir.cleanup()
|
the-stack_0_8670 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
encoders,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False, append_source_id=False,
num_buckets=0,
shuffle=True,
args=None
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset_sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset, eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
split=split,
args=args
)
@register_task('translation')
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != 'test'),
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, 'eval_bleu', False):
assert getattr(args, 'eval_bleu_detok', None) is not None, (
'--eval-bleu-detok is required if using --eval-bleu; '
'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '
'to disable detokenization, e.g., when using sentencepiece)'
)
detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')
self.tokenizer = encoders.build_tokenizer(Namespace(
tokenizer=getattr(args, 'eval_bleu_detok', None),
**detok_args
))
gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')
self.sequence_generator = self.build_generator([model], Namespace(**gen_args))
return model
def valid_step(self, sample, model, criterion, **kwargs):
# 看是否需要获取dependency_mat
special_input = model.get_special_input(sample)
loss, sample_size, logging_output = super().valid_step(sample, model, criterion, **special_input)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output['_bleu_sys_len'] = bleu.sys_len
logging_output['_bleu_ref_len'] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]
logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs('_bleu_counts_' + str(i)))
totals.append(sum_logs('_bleu_totals_' + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar('_bleu_counts', np.array(counts))
metrics.log_scalar('_bleu_totals', np.array(totals))
metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))
metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if 'smooth_method' in fn_sig:
smooth = {'smooth_method': 'exp'}
else:
smooth = {'smooth': 'exp'}
bleu = sacrebleu.compute_bleu(
correct=meters['_bleu_counts'].sum,
total=meters['_bleu_totals'].sum,
sys_len=meters['_bleu_sys_len'].sum,
ref_len=meters['_bleu_ref_len'].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived('bleu', compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
unk_string=(
"UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"
),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]['tokens']))
refs.append(decode(
utils.strip_pad(sample['target'][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
))
if self.args.eval_bleu_print_samples:
logger.info('example hypothesis: ' + hyps[0])
logger.info('example reference: ' + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
the-stack_0_8671 | #!/usr/bin/env python3
# You are probably well aware of the 'birthday paradox'
# https://en.wikipedia.org/wiki/Birthday_problem
# Let's try simulating it
# We will have a variable number of bins (can be months or days)
# And some number of trials for the simulation
# And some number of people whose have random birthdays
# Use assert() to check parameters
# On the command line:
# python3 birthday.py <bins> <trials> <people>
import sys #allows us to use sys arg
import random
assert(len(sys.argv) == 4) #4 refers to the numbers on the line
bins = int(sys.argv[1])
trials = int(sys.argv[2])
people = int(sys.argv[3])
assert(bins > 0)
assert(trials > 0)
assert(people >1)
collisions = 0
for t in range(trials):
calendar = [] #create and empty calendar
same_day = False
for i in range(bins):
calendar.append(0) #line 29 and 30 is the same thing as calendar = [0] = bins
for p in range(people): #insert people into calendar
r = random.randint(0, bins-1) #r represents their birthday
calendar[r] += 1
for day in calendar: #finds shared birthday
if day >1:
same_day = True
break #makes this faster beacuse it breaks the loop as soon as a same birthday is found
if same_day:
collisions += 1
print(collisions/trials)
"""
python3 birthday.py 365 1000 23
0.520
"""
|
the-stack_0_8672 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class GroupGreetingEventGreetingOwner(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
GroupGreetingEventGreetingOwner - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str'
}
self.attribute_map = {
'id': 'id'
}
self._id = None
@property
def id(self):
"""
Gets the id of this GroupGreetingEventGreetingOwner.
:return: The id of this GroupGreetingEventGreetingOwner.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this GroupGreetingEventGreetingOwner.
:param id: The id of this GroupGreetingEventGreetingOwner.
:type: str
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_8673 | from bs4 import BeautifulSoup
ENDPOINT = 'https://www2.correios.com.br/sistemas/rastreamento/ctrl/ctrlRastreamento.cfm'
def __make_request(session, tracking_id):
payload = {
'acao': 'track',
'objetos': tracking_id,
'btnPesq': 'Buscar'
}
return session.post(ENDPOINT, data=payload)
async def __make_soup(response):
if type(response) == str:
return BeautifulSoup(response, 'html.parser')
return BeautifulSoup(await response.text(), 'html.parser')
def __find_href(tag):
a = tag.find('a')
if a:
return a.get('href')
def __get_events(soup):
events = soup.select('td.sroLbEvent')
for i, event in enumerate(events):
events[i] = {
'event': event.strong.string,
'link': __find_href(event)
}
return events
def __get_info(soup):
infos = soup.select('td.sroDtEvent')
for i, info in enumerate(infos):
info = list(info.stripped_strings)
infos[i] = {
'date': info[0],
'hour': info[1],
'local': __fix_local(info[2])
}
return infos
def __fix_local(local):
return local.replace('\xa0/\xa0', ' / ')
def _get_events(html):
soup = BeautifulSoup(html, 'lxml')
events = __get_events(soup)
infos = __get_info(soup)
full_events = []
for event, info in zip(events, infos):
full_events.append({**event, **info})
return full_events
async def track_package(session, tracking_id):
async with __make_request(session, tracking_id) as r:
html = await r.text()
if 'Aguardando postagem pelo remetente.' in html:
return
else:
return _get_events(html)
|
the-stack_0_8680 | #!/usr/bin/python3
# coding=utf-8
"""
:Copyright: © 2022 Advanced Control Systems, Inc. All Rights Reserved.
@Author: Stephen Hung
@Author: Darren Liang
@Date : 2022-02-18
"""
import os
import sys
sys.path.append("..")
from adms_api.core.OracleInterface import OracleInterface
# from acsprism import RtdbAddress, RtdbPoint, rtdb_init
# from core.LinuxInterface import LinuxInterface
# APP INFO
TITLE = "ADMS API"
IPADDR = "127.0.0.1"
PORT = "5000"
# PRISM INFO
# PRISM = LinuxInterface()
# DB INFO
def connect_database():
USER = os.getenv('ORACLE_USER', 'acs_das')
PSWD = os.getenv('ORACLE_PW' , 'acs_das')
TNS = os.getenv('ORACLE_DBSTRING', 'ems')
DASdb = OracleInterface(USER, PSWD, TNS)
#DASdb.ConnectTest()
return DASdb
# LOG INFO
LOG_FILENAME = 'ADMS_API.log'
LOG_FORMAT = '%(asctime)s [%(process)d] %(levelname)s %(name)s: %(message)s'
LOG_FOLDER = '/home/acs/tmp'
if __name__ == "__main__":
USER = ""
PSWD = ""
TNS = ""
DASdb = OracleInterface(USER, PSWD, TNS)
DASdb.ConnectTest() |
the-stack_0_8684 | from datasource.data_orchestrator import DataOrchestrator
from datasource.factors.factors_processor import FactorsProcessor
from logic.embeddings.spacy_embedder import SpacyEmbedder
from logic.reduction.umap_reducer import UmapReducer
from logic.clustering.hdbscan_clusterer import HDBScanClusterer
from logic.ml_model_dao.ml_model_docker_volume_dao import MLModelDockerVolumeDAO
from elastic.elastic_indices import get_factor_recommendation_index_id
class FactorsOrchestrator(DataOrchestrator):
UMAP_REDUCER_MODEL_NAME = 'umap-reducer'
HDBSCAN_CLUSTERER_MODEL_NAME = 'hdbscan-clusterer'
def __init__(self, data_source, es_host, kb_index, use_saved_reducer=False, use_saved_clusterer=False):
self.data_source = data_source
self.load_reducer = use_saved_reducer
self.load_clusterer = use_saved_clusterer
self.ml_model_dao = MLModelDockerVolumeDAO(es_host, kb_index)
self.curation_index_id = get_factor_recommendation_index_id(kb_index)
def orchestrate(self):
if self.load_reducer:
reducer = self.load_model(self.UMAP_REDUCER_MODEL_NAME)
else:
reducer = UmapReducer(300, 2, 0.01, 15)
if self.load_clusterer:
clusterer = self.load_model(self.HDBSCAN_CLUSTERER_MODEL_NAME)
else:
clusterer = HDBScanClusterer(2, 15, 8, 0.01)
embedder = SpacyEmbedder(normalize=True)
processor = FactorsProcessor(self.data_source, reducer, clusterer, embedder)
data = processor.process()
if not self.load_reducer:
self.save_model(reducer, self.UMAP_REDUCER_MODEL_NAME)
if not self.load_clusterer:
self.save_model(clusterer, self.HDBSCAN_CLUSTERER_MODEL_NAME)
return data
def load_model(self, name):
return self.ml_model_dao.load(name, self.curation_index_id)
def save_model(self, data, model_name):
self.ml_model_dao.save(data, model_name, self.curation_index_id)
|
the-stack_0_8685 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def model(mnist, epoches=1000, batch_size=100, learning_rate=0.003):
print("Start model")
with tf.name_scope('X'):
X = tf.placeholder(tf.float32, [None, 784], name='X')
x_image = tf.reshape(X, [-1, 28, 28, 1])
with tf.name_scope('weights'):
W = tf.Variable(tf.zeros([784, 10]), name='weights')
with tf.name_scope('biases'):
b = tf.Variable(tf.zeros([10]), name='biases')
with tf.name_scope('Wx_plus_b'):
# Модель Y = X.W + b
Y = tf.nn.softmax(tf.matmul(tf.reshape(X, [-1, 784]), W) + b, name='labels')
# Подстановка для корректных значений входных данных
with tf.name_scope('Y_'):
Y_ = tf.placeholder(tf.float32, [None, 10])
with tf.name_scope('xentropy'):
# Функция потерь H = Sum(Y_ * log(Y))
cross_entropy = -tf.reduce_sum(Y_ * tf.log(Y))
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# Доля верных ответов найденных в наборе
is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
with tf.name_scope('xentropy_mean'):
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
with tf.name_scope('train'):
# Оптимизируем функцию потерь меотодом градиентного спуска
# 0.003 - это шаг градиента, гиперпараметр
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Минимизируем потери
train_step = optimizer.minimize(cross_entropy)
tf.summary.image('input', x_image, 10)
tf.summary.histogram('weights', W)
tf.summary.histogram('biases', b)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.Session() as sess:
merged = tf.summary.merge_all() # Merge all the summaries and write them out to
writer = tf.summary.FileWriter("/tmp/tensorflow/one_layer_nn", sess.graph)
tf.global_variables_initializer().run()
for i in range(epoches):
# загружаем набор изображений и меток классов
batch_X, batch_Y = mnist.train.next_batch(batch_size)
train_data={X: batch_X, Y_: batch_Y}
# train
sess.run(train_step, feed_dict=train_data)
if i % 10 == 0:
test_data={X: mnist.test.images, Y_: mnist.test.labels}
summary, a = sess.run([merged, accuracy], feed_dict=test_data)
writer.add_summary(summary, i)
if i % 200 == 0:
print("Test: {}".format(a))
writer.close()
def main():
print("MNIST single layer NN")
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=True)
tf.set_random_seed(0)
tf.reset_default_graph()
model(mnist, epoches=10000)
if __name__ == '__main__':
main()
|
the-stack_0_8686 | """
NCL_station_3.py
================
This script illustrates the following concepts:
- Drawing station numbers on a map, and removing ones that overlap
- Attaching lots of text strings to a map
- Using Cartopy's GeoAxes.gridlines as a workaround to adding tick labels on Axes with Mercator (or another) map projection
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/station_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/station_3_1_lg.png and https://www.ncl.ucar.edu/Applications/Images/station_3_2_lg.png
"""
###################################################
# Import packages:
import numpy as np
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
import geocat.datafiles as gdf
###################################################
# Read in data:
# Open a ascii data file using pandas read_csv and assigning column names
ds = pd.read_csv(
gdf.get('ascii_files/istasyontablosu_son.txt'),
delimiter='\\s+',
names=['index', 'station', 'year1', 'year2', 'number', 'lat', 'lon'])
# Get number of stations
npts = len(ds)
# Extract variables
no = ds.index + 1 # +1 because Pandas' RangeIndex defaults start with 0
lat = ds.lat
lon = ds.lon
##############################################################################
# Helper function to add plot elements to the axes
def create_axes(maintitle):
# Generate figure (set its size (width, height) in inches)
fig = plt.figure(figsize=(12, 6.5))
# Generate axes
ax = plt.axes(projection=ccrs.Mercator())
# Set extent to show particular area of the map
ax.set_extent([25.5, 45.2, 35.5, 42.5], ccrs.PlateCarree())
# Add state boundaries other lake features
ax.add_feature(cfeature.LAND, facecolor='none', edgecolor='gray')
# Draw gridlines
gl = ax.gridlines(crs=ccrs.PlateCarree(),
draw_labels=True,
dms=False,
x_inline=False,
y_inline=False,
linewidth=1,
color="gray",
alpha=0.25)
# Set frequency of gridlines in the x and y directions
gl.xlocator = mticker.FixedLocator(np.arange(26, 45, 2))
gl.ylocator = mticker.FixedLocator(np.arange(36, 43, 1))
# Turn top/right labels
gl.top_labels = False
gl.right_labels = False
# Set label sizes
gl.xlabel_style = {"rotation": 0, "size": 14}
gl.ylabel_style = {"rotation": 0, "size": 14}
# Manually turn off ticks on top and right spines
ax.tick_params(axis='x', top=False)
ax.tick_params(axis='y', right=False)
# Add title
ax.set_title(maintitle, fontweight='bold', fontsize=18, y=1.03)
return fig, ax
##############################################################################
# Plot with texts overlapping
fig, ax = create_axes('Overlapping text strings')
# Add all station number texts
for i in range(npts):
ax.text(lon[i],
lat[i],
no[i],
fontsize=8,
fontweight='bold',
va='center',
ha='center',
transform=ccrs.PlateCarree())
# Show the plot
plt.tight_layout()
plt.show()
##############################################################################
# Plot without texts overlapping
fig, ax = create_axes('Overlapping text strings removed')
# Transpose the array of longitude and latitude for easier access of the location of each station point
location = np.transpose(np.array([lon, lat]))
# Create an array of booleans denoting if station would be removed
remove = np.full(npts, False)
# Currently minimum distance is calculated through finding distance between two suitable stations
# In the future we would like to find mindist by finding the width and height of texts in pixel coordinates
mindist = np.sqrt(np.sum(np.square(location[123] - location[124])))
# Tag station to be removed using array `remove`
# Loop through every pair of stations and calculate distances between them
for i in range(npts):
for j in range(npts):
# Calculate euclidean distance with numpy functions
dist = np.sqrt(np.sum(np.square(location[j] - location[i])))
if dist <= mindist and i != j and not remove[j]:
# Tag one of the stations to be removed if distance between them allows for overlap,
# they are different stations, and if the other station will not be removed
remove[i] = True
# Add text if it is not tagged to be removed
for i in range(npts):
if not remove[i]:
ax.text(lon[i],
lat[i],
no[i],
fontsize=8,
fontweight='bold',
va='center',
ha='center',
transform=ccrs.PlateCarree())
# Show the plot
plt.tight_layout()
plt.show()
|
the-stack_0_8687 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import sys
from imp import load_source
from os.path import basename, dirname, isdir, isfile, join
import click
import semantic_version
from platformio import __version__, app, exception, util
from platformio.compat import PY2, hashlib_encode_data, is_bytes
from platformio.managers.core import get_core_package_dir
from platformio.managers.package import BasePkgManager, PackageManager
from platformio.proc import (BuildAsyncPipe, copy_pythonpath_to_osenv,
exec_command, get_pythonexe_path)
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (get_project_boards_dir,
get_project_core_dir,
get_project_packages_dir,
get_project_platforms_dir)
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class PlatformManager(BasePkgManager):
def __init__(self, package_dir=None, repositories=None):
if not repositories:
repositories = [
"https://dl.bintray.com/platformio/dl-platforms/manifest.json",
"{0}://dl.platformio.org/platforms/manifest.json".format(
"https" if app.get_setting("enable_ssl") else "http")
]
BasePkgManager.__init__(self, package_dir
or get_project_platforms_dir(), repositories)
@property
def manifest_names(self):
return ["platform.json"]
def get_manifest_path(self, pkg_dir):
if not isdir(pkg_dir):
return None
for name in self.manifest_names:
manifest_path = join(pkg_dir, name)
if isfile(manifest_path):
return manifest_path
return None
def install(self,
name,
requirements=None,
with_packages=None,
without_packages=None,
skip_default_package=False,
after_update=False,
silent=False,
force=False,
**_): # pylint: disable=too-many-arguments, arguments-differ
platform_dir = BasePkgManager.install(self,
name,
requirements,
silent=silent,
force=force)
p = PlatformFactory.newPlatform(platform_dir)
# don't cleanup packages or install them after update
# we check packages for updates in def update()
if after_update:
return True
p.install_packages(with_packages,
without_packages,
skip_default_package,
silent=silent,
force=force)
return self.cleanup_packages(list(p.packages))
def uninstall(self, package, requirements=None, after_update=False):
if isdir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPlatform(package)
p = PlatformFactory.newPlatform(pkg_dir)
BasePkgManager.uninstall(self, pkg_dir, requirements)
# don't cleanup packages or install them after update
# we check packages for updates in def update()
if after_update:
return True
return self.cleanup_packages(list(p.packages))
def update( # pylint: disable=arguments-differ
self,
package,
requirements=None,
only_check=False,
only_packages=False):
if isdir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPlatform(package)
p = PlatformFactory.newPlatform(pkg_dir)
pkgs_before = list(p.get_installed_packages())
missed_pkgs = set()
if not only_packages:
BasePkgManager.update(self, pkg_dir, requirements, only_check)
p = PlatformFactory.newPlatform(pkg_dir)
missed_pkgs = set(pkgs_before) & set(p.packages)
missed_pkgs -= set(p.get_installed_packages())
p.update_packages(only_check)
self.cleanup_packages(list(p.packages))
if missed_pkgs:
p.install_packages(with_packages=list(missed_pkgs),
skip_default_package=True)
return True
def cleanup_packages(self, names):
self.cache_reset()
deppkgs = {}
for manifest in PlatformManager().get_installed():
p = PlatformFactory.newPlatform(manifest['__pkg_dir'])
for pkgname, pkgmanifest in p.get_installed_packages().items():
if pkgname not in deppkgs:
deppkgs[pkgname] = set()
deppkgs[pkgname].add(pkgmanifest['version'])
pm = PackageManager(get_project_packages_dir())
for manifest in pm.get_installed():
if manifest['name'] not in names:
continue
if (manifest['name'] not in deppkgs
or manifest['version'] not in deppkgs[manifest['name']]):
try:
pm.uninstall(manifest['__pkg_dir'], after_update=True)
except exception.UnknownPackage:
pass
self.cache_reset()
return True
@util.memoized(expire="5s")
def get_installed_boards(self):
boards = []
for manifest in self.get_installed():
p = PlatformFactory.newPlatform(manifest['__pkg_dir'])
for config in p.get_boards().values():
board = config.get_brief_data()
if board not in boards:
boards.append(board)
return boards
@staticmethod
def get_registered_boards():
return util.get_api_result("/boards", cache_valid="7d")
def get_all_boards(self):
boards = self.get_installed_boards()
know_boards = ["%s:%s" % (b['platform'], b['id']) for b in boards]
try:
for board in self.get_registered_boards():
key = "%s:%s" % (board['platform'], board['id'])
if key not in know_boards:
boards.append(board)
except (exception.APIRequestError, exception.InternetIsOffline):
pass
return sorted(boards, key=lambda b: b['name'])
def board_config(self, id_, platform=None):
for manifest in self.get_installed_boards():
if manifest['id'] == id_ and (not platform
or manifest['platform'] == platform):
return manifest
for manifest in self.get_registered_boards():
if manifest['id'] == id_ and (not platform
or manifest['platform'] == platform):
return manifest
raise exception.UnknownBoard(id_)
class PlatformFactory(object):
@staticmethod
def get_clsname(name):
name = re.sub(r"[^\da-z\_]+", "", name, flags=re.I)
return "%s%sPlatform" % (name.upper()[0], name.lower()[1:])
@staticmethod
def load_module(name, path):
module = None
try:
module = load_source("platformio.managers.platform.%s" % name,
path)
except ImportError:
raise exception.UnknownPlatform(name)
return module
@classmethod
def newPlatform(cls, name, requirements=None):
pm = PlatformManager()
platform_dir = None
if isdir(name):
platform_dir = name
name = pm.load_manifest(platform_dir)['name']
elif name.endswith("platform.json") and isfile(name):
platform_dir = dirname(name)
name = util.load_json(name)['name']
else:
name, requirements, url = pm.parse_pkg_uri(name, requirements)
platform_dir = pm.get_package_dir(name, requirements, url)
if platform_dir:
name = pm.load_manifest(platform_dir)['name']
if not platform_dir:
raise exception.UnknownPlatform(
name if not requirements else "%s@%s" % (name, requirements))
platform_cls = None
if isfile(join(platform_dir, "platform.py")):
platform_cls = getattr(
cls.load_module(name, join(platform_dir, "platform.py")),
cls.get_clsname(name))
else:
platform_cls = type(str(cls.get_clsname(name)), (PlatformBase, ),
{})
_instance = platform_cls(join(platform_dir, "platform.json"))
assert isinstance(_instance, PlatformBase)
return _instance
class PlatformPackagesMixin(object):
def install_packages( # pylint: disable=too-many-arguments
self,
with_packages=None,
without_packages=None,
skip_default_package=False,
silent=False,
force=False):
with_packages = set(self.find_pkg_names(with_packages or []))
without_packages = set(self.find_pkg_names(without_packages or []))
upkgs = with_packages | without_packages
ppkgs = set(self.packages)
if not upkgs.issubset(ppkgs):
raise exception.UnknownPackage(", ".join(upkgs - ppkgs))
for name, opts in self.packages.items():
version = opts.get("version", "")
if name in without_packages:
continue
elif (name in with_packages or
not (skip_default_package or opts.get("optional", False))):
if ":" in version:
self.pm.install("%s=%s" % (name, version),
silent=silent,
force=force)
else:
self.pm.install(name, version, silent=silent, force=force)
return True
def find_pkg_names(self, candidates):
result = []
for candidate in candidates:
found = False
# lookup by package types
for _name, _opts in self.packages.items():
if _opts.get("type") == candidate:
result.append(_name)
found = True
if (self.frameworks and candidate.startswith("framework-")
and candidate[10:] in self.frameworks):
result.append(self.frameworks[candidate[10:]]['package'])
found = True
if not found:
result.append(candidate)
return result
def update_packages(self, only_check=False):
for name, manifest in self.get_installed_packages().items():
requirements = self.packages[name].get("version", "")
if ":" in requirements:
_, requirements, __ = self.pm.parse_pkg_uri(requirements)
self.pm.update(manifest['__pkg_dir'], requirements, only_check)
def get_installed_packages(self):
items = {}
for name in self.packages:
pkg_dir = self.get_package_dir(name)
if pkg_dir:
items[name] = self.pm.load_manifest(pkg_dir)
return items
def are_outdated_packages(self):
for name, manifest in self.get_installed_packages().items():
requirements = self.packages[name].get("version", "")
if ":" in requirements:
_, requirements, __ = self.pm.parse_pkg_uri(requirements)
if self.pm.outdated(manifest['__pkg_dir'], requirements):
return True
return False
def get_package_dir(self, name):
version = self.packages[name].get("version", "")
if ":" in version:
return self.pm.get_package_dir(
*self.pm.parse_pkg_uri("%s=%s" % (name, version)))
return self.pm.get_package_dir(name, version)
def get_package_version(self, name):
pkg_dir = self.get_package_dir(name)
if not pkg_dir:
return None
return self.pm.load_manifest(pkg_dir).get("version")
class PlatformRunMixin(object):
LINE_ERROR_RE = re.compile(r"(^|\s+)error:?\s+", re.I)
@staticmethod
def encode_scons_arg(value):
data = base64.urlsafe_b64encode(hashlib_encode_data(value))
return data.decode() if is_bytes(data) else data
@staticmethod
def decode_scons_arg(data):
value = base64.urlsafe_b64decode(data)
return value.decode() if is_bytes(value) else value
def run( # pylint: disable=too-many-arguments
self, variables, targets, silent, verbose, jobs):
assert isinstance(variables, dict)
assert isinstance(targets, list)
config = ProjectConfig.get_instance(variables['project_config'])
options = config.items(env=variables['pioenv'], as_dict=True)
if "framework" in options:
# support PIO Core 3.0 dev/platforms
options['pioframework'] = options['framework']
self.configure_default_packages(options, targets)
self.install_packages(silent=True)
self.silent = silent
self.verbose = verbose or app.get_setting("force_verbose")
if "clean" in targets:
targets = ["-c", "."]
variables['platform_manifest'] = self.manifest_path
if "build_script" not in variables:
variables['build_script'] = self.get_build_script()
if not isfile(variables['build_script']):
raise exception.BuildScriptNotFound(variables['build_script'])
result = self._run_scons(variables, targets, jobs)
assert "returncode" in result
return result
def _run_scons(self, variables, targets, jobs):
args = [
get_pythonexe_path(),
join(get_core_package_dir("tool-scons"), "script", "scons"),
"-Q", "--warn=no-no-parallel-support",
"--jobs", str(jobs),
"--sconstruct", join(util.get_source_dir(), "builder", "main.py")
] # yapf: disable
args.append("PIOVERBOSE=%d" % (1 if self.verbose else 0))
# pylint: disable=protected-access
args.append("ISATTY=%d" %
(1 if click._compat.isatty(sys.stdout) else 0))
args += targets
# encode and append variables
for key, value in variables.items():
args.append("%s=%s" % (key.upper(), self.encode_scons_arg(value)))
def _write_and_flush(stream, data):
try:
stream.write(data)
stream.flush()
except IOError:
pass
copy_pythonpath_to_osenv()
result = exec_command(
args,
stdout=BuildAsyncPipe(
line_callback=self._on_stdout_line,
data_callback=lambda data: _write_and_flush(sys.stdout, data)),
stderr=BuildAsyncPipe(
line_callback=self._on_stderr_line,
data_callback=lambda data: _write_and_flush(sys.stderr, data)))
return result
def _on_stdout_line(self, line):
if "`buildprog' is up to date." in line:
return
self._echo_line(line, level=1)
def _on_stderr_line(self, line):
is_error = self.LINE_ERROR_RE.search(line) is not None
self._echo_line(line, level=3 if is_error else 2)
a_pos = line.find("fatal error:")
b_pos = line.rfind(": No such file or directory")
if a_pos == -1 or b_pos == -1:
return
self._echo_missed_dependency(line[a_pos + 12:b_pos].strip())
def _echo_line(self, line, level):
if line.startswith("scons: "):
line = line[7:]
assert 1 <= level <= 3
if self.silent and (level < 2 or not line):
return
fg = (None, "yellow", "red")[level - 1]
if level == 1 and "is up to date" in line:
fg = "green"
click.secho(line, fg=fg, err=level > 1, nl=False)
@staticmethod
def _echo_missed_dependency(filename):
if "/" in filename or not filename.endswith((".h", ".hpp")):
return
banner = """
{dots}
* Looking for {filename_styled} dependency? Check our library registry!
*
* CLI > platformio lib search "header:{filename}"
* Web > {link}
*
{dots}
""".format(filename=filename,
filename_styled=click.style(filename, fg="cyan"),
link=click.style(
"https://platformio.org/lib/search?query=header:%s" %
quote(filename, safe=""),
fg="blue"),
dots="*" * (56 + len(filename)))
click.echo(banner, err=True)
class PlatformBase( # pylint: disable=too-many-public-methods
PlatformPackagesMixin, PlatformRunMixin):
PIO_VERSION = semantic_version.Version(util.pepver_to_semver(__version__))
_BOARDS_CACHE = {}
def __init__(self, manifest_path):
self.manifest_path = manifest_path
self.silent = False
self.verbose = False
self._BOARDS_CACHE = {}
self._manifest = util.load_json(manifest_path)
self._custom_packages = None
self.pm = PackageManager(get_project_packages_dir(),
self.package_repositories)
# if self.engines and "platformio" in self.engines:
# if self.PIO_VERSION not in semantic_version.Spec(
# self.engines['platformio']):
# raise exception.IncompatiblePlatform(self.name,
# str(self.PIO_VERSION))
@property
def name(self):
return self._manifest['name']
@property
def title(self):
return self._manifest['title']
@property
def description(self):
return self._manifest['description']
@property
def version(self):
return self._manifest['version']
@property
def homepage(self):
return self._manifest.get("homepage")
@property
def vendor_url(self):
return self._manifest.get("url")
@property
def docs_url(self):
return self._manifest.get("docs")
@property
def repository_url(self):
return self._manifest.get("repository", {}).get("url")
@property
def license(self):
return self._manifest.get("license")
@property
def frameworks(self):
return self._manifest.get("frameworks")
@property
def engines(self):
return self._manifest.get("engines")
@property
def package_repositories(self):
return self._manifest.get("packageRepositories")
@property
def manifest(self):
return self._manifest
@property
def packages(self):
packages = self._manifest.get("packages", {})
for item in (self._custom_packages or []):
name = item
version = "*"
if "@" in item:
name, version = item.split("@", 2)
name = name.strip()
if name not in packages:
packages[name] = {}
packages[name].update({
"version": version.strip(),
"optional": False
})
return packages
def get_dir(self):
return dirname(self.manifest_path)
def get_build_script(self):
main_script = join(self.get_dir(), "builder", "main.py")
if isfile(main_script):
return main_script
raise NotImplementedError()
def is_embedded(self):
for opts in self.packages.values():
if opts.get("type") == "uploader":
return True
return False
def get_boards(self, id_=None):
def _append_board(board_id, manifest_path):
config = PlatformBoardConfig(manifest_path)
if "platform" in config and config.get("platform") != self.name:
return
if "platforms" in config \
and self.name not in config.get("platforms"):
return
config.manifest['platform'] = self.name
self._BOARDS_CACHE[board_id] = config
bdirs = [
get_project_boards_dir(),
join(get_project_core_dir(), "boards"),
join(self.get_dir(), "boards"),
]
if id_ is None:
for boards_dir in bdirs:
if not isdir(boards_dir):
continue
for item in sorted(os.listdir(boards_dir)):
_id = item[:-5]
if not item.endswith(".json") or _id in self._BOARDS_CACHE:
continue
_append_board(_id, join(boards_dir, item))
else:
if id_ not in self._BOARDS_CACHE:
for boards_dir in bdirs:
if not isdir(boards_dir):
continue
manifest_path = join(boards_dir, "%s.json" % id_)
if isfile(manifest_path):
_append_board(id_, manifest_path)
break
if id_ not in self._BOARDS_CACHE:
raise exception.UnknownBoard(id_)
return self._BOARDS_CACHE[id_] if id_ else self._BOARDS_CACHE
def board_config(self, id_):
return self.get_boards(id_)
def get_package_type(self, name):
return self.packages[name].get("type")
def configure_default_packages(self, options, targets):
# override user custom packages
self._custom_packages = options.get("platform_packages")
# enable used frameworks
for framework in options.get("framework", []):
if not self.frameworks:
continue
framework = framework.lower().strip()
if not framework or framework not in self.frameworks:
continue
_pkg_name = self.frameworks[framework].get("package")
if _pkg_name:
self.packages[_pkg_name]['optional'] = False
# enable upload tools for upload targets
if any(["upload" in t for t in targets] + ["program" in targets]):
for name, opts in self.packages.items():
if opts.get("type") == "uploader":
self.packages[name]['optional'] = False
# skip all packages in "nobuild" mode
# allow only upload tools and frameworks
elif "nobuild" in targets and opts.get("type") != "framework":
self.packages[name]['optional'] = True
def get_lib_storages(self):
storages = []
for opts in (self.frameworks or {}).values():
if "package" not in opts:
continue
pkg_dir = self.get_package_dir(opts['package'])
if not pkg_dir or not isdir(join(pkg_dir, "libraries")):
continue
libs_dir = join(pkg_dir, "libraries")
storages.append({"name": opts['package'], "path": libs_dir})
libcores_dir = join(libs_dir, "__cores__")
if not isdir(libcores_dir):
continue
for item in os.listdir(libcores_dir):
libcore_dir = join(libcores_dir, item)
if not isdir(libcore_dir):
continue
storages.append({
"name": "%s-core-%s" % (opts['package'], item),
"path": libcore_dir
})
return storages
class PlatformBoardConfig(object):
def __init__(self, manifest_path):
self._id = basename(manifest_path)[:-5]
assert isfile(manifest_path)
self.manifest_path = manifest_path
try:
self._manifest = util.load_json(manifest_path)
except ValueError:
raise exception.InvalidBoardManifest(manifest_path)
if not set(["name", "url", "vendor"]) <= set(self._manifest):
raise exception.PlatformioException(
"Please specify name, url and vendor fields for " +
manifest_path)
def get(self, path, default=None):
try:
value = self._manifest
for k in path.split("."):
value = value[k]
# pylint: disable=undefined-variable
if PY2 and isinstance(value, unicode):
# cast to plain string from unicode for PY2, resolves issue in
# dev/platform when BoardConfig.get() is used in pair with
# os.path.join(file_encoding, unicode_encoding)
try:
value = value.encode("utf-8")
except UnicodeEncodeError:
pass
return value
except KeyError:
if default is not None:
return default
raise KeyError("Invalid board option '%s'" % path)
def update(self, path, value):
newdict = None
for key in path.split(".")[::-1]:
if newdict is None:
newdict = {key: value}
else:
newdict = {key: newdict}
util.merge_dicts(self._manifest, newdict)
def __contains__(self, key):
try:
self.get(key)
return True
except KeyError:
return False
@property
def id(self):
return self._id
@property
def id_(self):
return self.id
@property
def manifest(self):
return self._manifest
def get_brief_data(self):
return {
"id":
self.id,
"name":
self._manifest['name'],
"platform":
self._manifest.get("platform"),
"mcu":
self._manifest.get("build", {}).get("mcu", "").upper(),
"fcpu":
int("".join([
c for c in str(
self._manifest.get("build", {}).get("f_cpu", "0L"))
if c.isdigit()
])),
"ram":
self._manifest.get("upload", {}).get("maximum_ram_size", 0),
"rom":
self._manifest.get("upload", {}).get("maximum_size", 0),
"connectivity":
self._manifest.get("connectivity"),
"frameworks":
self._manifest.get("frameworks"),
"debug":
self.get_debug_data(),
"vendor":
self._manifest['vendor'],
"url":
self._manifest['url']
}
def get_debug_data(self):
if not self._manifest.get("debug", {}).get("tools"):
return None
tools = {}
for name, options in self._manifest['debug']['tools'].items():
tools[name] = {}
for key, value in options.items():
if key in ("default", "onboard"):
tools[name][key] = value
return {"tools": tools}
def get_debug_tool_name(self, custom=None):
debug_tools = self._manifest.get("debug", {}).get("tools")
tool_name = custom
if tool_name == "custom":
return tool_name
if not debug_tools:
raise exception.DebugSupportError(self._manifest['name'])
if tool_name:
if tool_name in debug_tools:
return tool_name
raise exception.DebugInvalidOptions(
"Unknown debug tool `%s`. Please use one of `%s` or `custom`" %
(tool_name, ", ".join(sorted(list(debug_tools)))))
# automatically select best tool
data = {"default": [], "onboard": [], "external": []}
for key, value in debug_tools.items():
if value.get("default"):
data['default'].append(key)
elif value.get("onboard"):
data['onboard'].append(key)
data['external'].append(key)
for key, value in data.items():
if not value:
continue
return sorted(value)[0]
assert any(item for item in data)
|
the-stack_0_8688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for form validation."""
import json
import unittest
from werkzeug import MultiDict
import webcompat
from webcompat import form
FIREFOX_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:48.0) Gecko/20100101 Firefox/48.0' # nopep8
class TestForm(unittest.TestCase):
"""Module for testing the form."""
def setUp(self):
"""Set up."""
self.maxDiff = None
webcompat.app.config['TESTING'] = True
self.maxDiff = None
self.app = webcompat.app.test_client()
def tearDown(self):
"""Tear down."""
pass
def test_normalize_url(self):
"""Check that URL is normalized."""
r = form.normalize_url('http://example.com')
self.assertEqual(r, 'http://example.com')
r = form.normalize_url(u'愛')
self.assertEqual(r, u'http://愛')
r = form.normalize_url(u'http://愛')
self.assertEqual(r, u'http://愛')
r = form.normalize_url('https://example.com')
self.assertEqual(r, 'https://example.com')
r = form.normalize_url('example.com')
self.assertEqual(r, 'http://example.com')
r = form.normalize_url('http:/example.com')
self.assertEqual(r, 'http://example.com')
r = form.normalize_url('https:/example.com')
self.assertEqual(r, 'https://example.com')
r = form.normalize_url('http:example.com')
self.assertEqual(r, 'http://example.com')
r = form.normalize_url('https:example.com')
self.assertEqual(r, 'https://example.com')
r = form.normalize_url('//example.com')
self.assertEqual(r, 'http://example.com')
r = form.normalize_url('http://https://bad.example.com')
self.assertEqual(r, 'https://bad.example.com')
r = form.normalize_url('http://param.example.com/?q=foo#bar')
self.assertEqual(r, 'http://param.example.com/?q=foo#bar')
r = form.normalize_url('')
self.assertIsNone(r)
def test_domain_name(self):
"""Check that domain name is extracted."""
r = form.domain_name('http://example.com')
self.assertEqual(r, 'example.com')
r = form.domain_name('https://example.com')
self.assertEqual(r, 'example.com')
r = form.normalize_url('')
self.assertIsNone(r)
def test_metadata_wrapping(self):
"""Check that metadata is processed and wrapped."""
TEST_DICT = {'cool': 'dude', 'wow': 'ok'}
EXPECTED_SINGLE = '<!-- @cool: dude -->\n'
EXPECTED_SINGLE_COMMA = '<!-- @cool: dude, wow -->\n'
EXPECTED_MULTIPLE = '<!-- @cool: dude -->\n<!-- @wow: ok -->\n'
r = form.wrap_metadata(('cool', 'dude'))
self.assertEqual(r, EXPECTED_SINGLE)
r = form.wrap_metadata(('cool', 'dude, wow'))
self.assertEqual(r, EXPECTED_SINGLE_COMMA)
r = form.get_metadata(('cool', 'wow'), TEST_DICT)
self.assertEqual(r, EXPECTED_MULTIPLE)
def test_radio_button_label(self):
"""Check that appropriate radio button label is returned."""
TEST_LABELS_LIST = [
(u'detection_bug', u'Desktop site instead of mobile site'),
(u'unknown_bug', u'Something else')
]
r = form.get_radio_button_label('unknown_bug', TEST_LABELS_LIST)
self.assertEqual(r, u'Something else')
r = form.get_radio_button_label(u'detection_bug', TEST_LABELS_LIST)
self.assertEqual(r, u'Desktop site instead of mobile site')
r = form.get_radio_button_label(None, TEST_LABELS_LIST)
self.assertEqual(r, u'Unknown')
r = form.get_radio_button_label('failme', TEST_LABELS_LIST)
self.assertEqual(r, u'Unknown')
def test_get_form(self):
"""Checks we return the right form with the appropriate data."""
with webcompat.app.test_request_context('/'):
actual = form.get_form(FIREFOX_UA)
expected_browser = 'Firefox 48.0'
expected_os = 'Mac OS X 10.11'
self.assertIsInstance(actual, form.IssueForm)
self.assertEqual(actual.browser.data, expected_browser)
self.assertEqual(actual.os.data, expected_os)
def test_get_metadata(self):
"""HTML comments need the right values depending on the keys."""
metadata_keys = ('sky', 'earth')
form_object = {'blah': 'goo', 'hello': 'moshi', 'sky': 'blue'}
actual = form.get_metadata(metadata_keys, form_object)
expected = u'<!-- @sky: blue -->\n<!-- @earth: None -->\n'
self.assertEqual(actual, expected)
form_object = MultiDict([
('reported_with', u'desktop-reporter'),
('url', u'http://localhost:5000/issues/new'),
('extra_labels', [u'type-stylo', u'type-webrender-enabled']),
('ua_header', u'Mozilla/5.0...Firefox 59.0'),
('browser', u'Firefox 59.0')])
metadata_keys = ['browser', 'ua_header', 'reported_with',
'extra_labels']
actual = form.get_metadata(metadata_keys, form_object)
expected = u'<!-- @browser: Firefox 59.0 -->\n<!-- @ua_header: Mozilla/5.0...Firefox 59.0 -->\n<!-- @reported_with: desktop-reporter -->\n<!-- @extra_labels: type-stylo, type-webrender-enabled -->\n' # nopep8
self.assertEqual(actual, expected)
def test_normalize_metadata(self):
"""Avoid some type of strings."""
cases = [('blue sky -->', 'blue sky'),
('blue sky ---->>', 'blue sky'),
('', ''),
('blue sky ', 'blue sky'),
('bad_bird <script>', ''),
('bad_bird <script-->>', ''),
('a' * 300, ''),
(None, None),
]
for meta_value, expected in cases:
self.assertEqual(form.normalize_metadata(meta_value), expected)
def test_build_formdata(self):
"""The data body sent to GitHub API."""
# we just need to test that nothing breaks
# even if the data are empty
form_object = {'foo': 'bar'}
actual = form.build_formdata(form_object)
expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: None\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': 'None - unknown'} # nopep8
self.assertIs(type(actual), dict)
self.assertEqual(actual, expected)
# testing for double URL Schemes.
form_object = {'url': 'http://https://example.com/'}
actual = form.build_formdata(form_object)
expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: https://example.com/\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': 'example.com - unknown'} # nopep8
self.assertEqual(actual, expected)
# testing with unicode strings.
form_object = {'url': u'愛'}
actual = form.build_formdata(form_object)
expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: http://\u611b\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': u'\u611b - unknown'} # nopep8
self.assertEqual(actual, expected)
def test_get_details(self):
"""Assert we handle valid JSON and other values."""
actual_string_arg = form.get_details('cool')
expected_string_arg = 'cool'
self.assertEqual(actual_string_arg, expected_string_arg)
actual_json_arg = form.get_details(json.dumps({'a': 'b', 'c': False}))
expected_json_arg = '<li>a: b</li><li>c: false</li>'
self.assertEqual(actual_json_arg, expected_json_arg)
def test_build_details(self):
"""Assert we return the expected HTML, for a json object or a string.
"""
actual_json_arg = form.build_details(json.dumps(
{'a': 'b', 'c': False}))
expected_json_arg = '<details>\n<summary>Browser Configuration</summary>\n<ul>\n <li>a: b</li><li>c: false</li>\n</ul>\n</details>' # nopep8
self.assertEqual(actual_json_arg, expected_json_arg)
actual_string_arg = form.build_details("cool")
expected_string_arg = '<details>\n<summary>Browser Configuration</summary>\n<ul>\n cool\n</ul>\n</details>' # nopep8
self.assertEqual(actual_string_arg, expected_string_arg)
|
the-stack_0_8690 | from typing import List, Union
import warnings
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_is_dataframe,
_check_contains_na,
_check_input_matches_training_df,
)
from feature_engine.variable_manipulation import _find_or_check_categorical_variables
class BaseCategoricalTransformer(BaseEstimator, TransformerMixin):
"""shared set-up checks and methods across categorical transformers"""
def _check_fit_input_and_variables(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Checks that input is a dataframe, finds categorical variables, or alternatively
checks that the variables entered by the user are of type object (categorical).
Checks absence of NA.
Parameters
----------
X : Pandas DataFrame
Raises
------
TypeError
If the input is not a Pandas DataFrame.
If any user provided variable is not categorical
ValueError
If there are no categorical variables in the df or the df is empty
If the variable(s) contain null values
Returns
-------
X : Pandas DataFrame
The same dataframe entered as parameter
variables : list
list of categorical variables
"""
# check input dataframe
X = _is_dataframe(X)
# find categorical variables or check variables entered by user are object
self.variables: List[Union[str, int]] = _find_or_check_categorical_variables(
X, self.variables
)
# check if dataset contains na
_check_contains_na(X, self.variables)
return X
def _check_transform_input_and_state(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Checks that the input is a dataframe and of the same size than the one used
in the fit method. Checks absence of NA.
Parameters
----------
X : Pandas DataFrame
Raises
------
TypeError
If the input is not a Pandas DataFrame
ValueError
If the variable(s) contain null values.
If the dataframe is not of same size as that used in fit()
Returns
-------
X : Pandas DataFrame
The same dataframe entered by the user.
"""
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables)
# Check input data contains same number of columns as df used to fit
_check_input_matches_training_df(X, self.input_shape_[1])
return X
def _check_encoding_dictionary(self):
"""After fit(), the encoders should return a dictionary with the original values
to numerical mappings as key, values. This function checks that the dictionary
was created and is not empty.
"""
# check that dictionary is not empty
if len(self.encoder_dict_) == 0:
raise ValueError(
"Encoder could not be fitted. Check the parameters and the variables "
"in your dataframe."
)
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Replace categories with the learned parameters.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features].
The dataset to transform.
Raises
------
TypeError
If the input is not a Pandas DataFrame
ValueError
- If the variable(s) contain null values
- If dataframe is not of same size as that used in fit()
Warning
If after encoding, NAN were introduced.
Returns
-------
X : pandas dataframe of shape = [n_samples, n_features].
The dataframe containing the categories replaced by numbers.
"""
X = self._check_transform_input_and_state(X)
# replace categories by the learned parameters
for feature in self.encoder_dict_.keys():
X[feature] = X[feature].map(self.encoder_dict_[feature])
# check if NaN values were introduced by the encoding
if X[self.encoder_dict_.keys()].isnull().sum().sum() > 0:
warnings.warn(
"NaN values were introduced in the returned dataframe by the encoder."
"This means that some of the categories in the input dataframe were "
"not present in the training set used when the fit method was called. "
"Thus, mappings for those categories do not exist. Try using the "
"RareLabelCategoricalEncoder to remove infrequent categories before "
"calling this encoder."
)
return X
def inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Convert the encoded variable back to the original values.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features].
The transformed dataframe.
Raises
------
TypeError
- If the input is not a Pandas DataFrame
ValueError
- If the variable(s) contain null values
- If the dataframe is not of same size as that used in fit()
Returns
-------
X : pandas dataframe of shape = [n_samples, n_features].
The un-transformed dataframe, with the categorical variables containing the
original values.
"""
X = self._check_transform_input_and_state(X)
# replace encoded categories by the original values
for feature in self.encoder_dict_.keys():
inv_map = {v: k for k, v in self.encoder_dict_[feature].items()}
X[feature] = X[feature].map(inv_map)
return X
|
the-stack_0_8691 | import os
from serde.json import from_json
from edge.command.common.precommand_check import precommand_checks
from edge.config import EdgeConfig
from edge.exception import EdgeException
from edge.state import EdgeState
from edge.train import TrainedModel
from edge.tui import TUI, StepTUI, SubStepTUI
from edge.vertex_deploy import vertex_deploy
from edge.path import get_model_dvc_pipeline, get_vertex_model_json
def model_deploy(model_name: str):
intro = f"Deploying model '{model_name}' on Vertex AI"
success_title = "Model deployed successfully"
success_message = "Success"
failure_title = "Model deployment failed"
failure_message = "See the errors above. See README for more details."
with EdgeConfig.context() as config:
with TUI(
intro,
success_title,
success_message,
failure_title,
failure_message
) as tui:
precommand_checks(config)
with EdgeState.context(config, to_lock=True, to_save=True) as state:
with StepTUI("Checking model configuration", emoji="🐏"):
with SubStepTUI("Checking that the model is initialised"):
if model_name not in config.models:
raise EdgeException("Model has not been initialised. "
f"Run `./edge.sh model init {model_name}` to initialise.")
if state.models is None or state.models[model_name] is None:
raise EdgeException("Model is missing from vertex:edge state. "
"This might mean that the model has not been initialised. "
f"Run `./edge.sh model init {model_name}` to initialise.")
endpoint_resource_name = state.models[model_name].endpoint_resource_name
with SubStepTUI("Checking that the model has been trained"):
if not os.path.exists(get_vertex_model_json(model_name)):
raise EdgeException(f"{get_vertex_model_json(model_name)} does not exist. "
"This means that the model has not been trained")
with open(get_vertex_model_json(model_name)) as file:
model = from_json(TrainedModel, file.read())
if model.is_local:
raise EdgeException("This model was trained locally, and hence cannot be deployed "
"on Vertex AI")
model_resource_name = model.model_name
vertex_deploy(endpoint_resource_name, model_resource_name, model_name)
state.models[model_name].deployed_model_resource_name = model_resource_name
short_endpoint_resource_name = "/".join(endpoint_resource_name.split("/")[2:])
tui.success_message = (
"You can see the deployed model at "
f"https://console.cloud.google.com/vertex-ai/"
f"{short_endpoint_resource_name}?project={config.google_cloud_project.project_id}\n\n"
"Happy herding! 🐏"
)
|
the-stack_0_8693 | """Interop with cc_* rules
These rules are temporary and will be deprecated in the future.
"""
load(":private/providers.bzl",
"HaskellBuildInfo",
"HaskellLibraryInfo",
"HaskellBinaryInfo",
"CcSkylarkApiProviderHacked",
)
load(":private/set.bzl", "set")
load("@bazel_skylib//:lib.bzl", "paths")
load(":private/path_utils.bzl", "ln")
CcInteropInfo = provider(
doc = "Information needed for interop with cc rules.",
fields = {
"hdrs": "CC headers",
"cpp_flags": "Preprocessor flags",
"include_args": "Extra include dirs",
}
)
def cc_headers(ctx):
"""Bring in scope the header files of dependencies, if any.
*Internal function - do not use.*
"""
hdrs = depset()
# XXX There's gotta be a better way to test the presence of
# CcSkylarkApiProvider.
ccs = [dep.cc for dep in ctx.attr.deps if hasattr(dep, "cc")]
hdrs = depset(transitive = [cc.transitive_headers for cc in ccs])
hdrs = depset(transitive = [hdrs] + [
# XXX cc_import doesn't produce a cc field, so we emulate it with a
# custom provider.
dep[CcSkylarkApiProviderHacked].transitive_headers
for dep in ctx.attr.deps if CcSkylarkApiProviderHacked in dep
])
include_directories = set.to_list(set.from_list(
[f for cc in ccs for f in cc.include_directories]
+ [f for dep in ctx.attr.deps if CcSkylarkApiProviderHacked in dep
for f in dep[CcSkylarkApiProviderHacked].include_directories]))
quote_include_directories = set.to_list(set.from_list(
[f for cc in ccs for f in cc.quote_include_directories]))
system_include_directories = set.to_list(set.from_list(
[f for cc in ccs for f in cc.system_include_directories]))
cpp_flags = (
["-D" + define for cc in ccs for define in cc.defines]
+ [f for include in quote_include_directories
for f in ["-iquote", include]]
+ [f for include in system_include_directories
for f in ["-isystem", include]])
include_args = ["-I" + include for include in include_directories]
return CcInteropInfo(
hdrs = hdrs.to_list(),
cpp_flags = cpp_flags,
include_args = include_args,
)
def _cc_import_impl(ctx):
strip_prefix = ctx.attr.strip_include_prefix
# cc_library's strip_include_prefix attribute accepts both absolute and
# relative paths. For simplicity we currently only implement absolute
# paths.
if strip_prefix.startswith("/"):
prefix = strip_prefix[1:]
else:
prefix = paths.join(ctx.label.workspace_root, ctx.label.package, strip_prefix)
roots = set.empty()
for f in ctx.files.hdrs:
# If it's a generated file, strip off the bin or genfiles prefix.
path = f.path
if path.startswith(ctx.bin_dir.path):
path = paths.relativize(path, ctx.bin_dir.path)
elif path.startswith(ctx.genfiles_dir.path):
path = paths.relativize(path, ctx.genfiles_dir.path)
if not path.startswith(prefix):
fail("Header {} does not have expected prefix {}".format(
path, prefix))
roots = set.insert(roots, f.root.path if f.root.path else ".")
include_directories = [paths.join(root, prefix) for root in set.to_list(roots)]
return [
DefaultInfo(files = depset(ctx.attr.shared_library.files)),
CcSkylarkApiProviderHacked(
transitive_headers =
depset(transitive = [l.files for l in ctx.attr.hdrs]),
include_directories = include_directories),
]
# XXX This is meant as a drop-in replacement for the native cc_import,
# but it's a temporary hack. It's only necessary because the native
# cc_import does not provide CcSkylarkApiProvider. So we write our own
# rule that does just that. See
# https://github.com/bazelbuild/bazel/issues/4369.
haskell_cc_import = rule(
_cc_import_impl,
attrs = {
"shared_library": attr.label(
# NOTE We do not list all extensions here because .so libraries may
# have numeric suffixes like foo.so.1.2.3, and if they also have
# SONAME with numeric suffix, matching file must be provided, so this
# attributes must accept libraries with almost arbitrary extensions.
# It would be easier if Skylark supported regexps.
allow_files = True,
doc = """A single precompiled shared library.
Bazel ensures it is available to the binary that depends on it
during runtime.
""",
),
"hdrs": attr.label_list(
allow_files = [".h"],
doc = """
The list of header files published by this precompiled library to be
directly included by sources in dependent rules.
""",
),
"strip_include_prefix": attr.string(
doc = """
The prefix to strip from the paths of the headers of this rule.
When set, the headers in the `hdrs` attribute of this rule are
accessible at their path (relative to the repository) with this
prefix cut off.
If it's a relative path, it's taken as a package-relative one. If it's an
absolute one, it's understood as a repository-relative path.
"""),
},
)
"""Imports a prebuilt shared library.
Use this to make `.so`, `.dll`, `.dylib` files residing in external
[external repositories][bazel-ext-repos] available to Haskell rules.
*This rule is temporary replacement for [cc_import][cc_import] and
will be deprecated in the future.*
Example:
```bzl
haskell_cc_import(name = "zlib", shared_library = "@zlib//:lib")
haskell_binary(
name = "crc32sum",
srcs = ["Main.hs"],
deps = [":zlib"],
prebuilt_dependencies = ["base"],
)
```
[bazel-ext-repos]: https://docs.bazel.build/versions/master/external.html
[cc_import]: https://docs.bazel.build/versions/master/be/c-cpp.html#cc_import
"""
def _cc_haskell_import(ctx):
dyn_libs = set.empty()
if HaskellBuildInfo in ctx.attr.dep:
set.mutable_union(dyn_libs, ctx.attr.dep[HaskellBuildInfo].dynamic_libraries)
else:
fail("{0} has to provide `HaskellBuildInfo`".format(ctx.attr.dep.label.name))
if HaskellBinaryInfo in ctx.attr.dep:
bin = ctx.attr.dep[HaskellBinaryInfo].binary
dyn_lib = ctx.actions.declare_file("lib{0}.so".format(bin.basename))
ln(ctx, bin, dyn_lib)
set.mutable_insert(dyn_libs, dyn_lib)
return [
DefaultInfo(
files = set.to_depset(dyn_libs),
default_runfiles = ctx.runfiles(
files = ctx.attr.dep.default_runfiles.files.to_list(),
collect_default = True,
),
data_runfiles = ctx.runfiles(
files = ctx.attr.dep.data_runfiles.files.to_list(),
collect_data = True,
),
)
]
if HaskellBinaryInfo in ctx.attr.dep:
dbin = ctx.attr.dep[HaskellBinaryInfo].dynamic_bin
if dbin != None:
set.mutable_insert(dyn_libs, dbin)
return [
DefaultInfo(
files = set.to_depset(dyn_libs)
)
]
if HaskellBinaryInfo in ctx.attr.dep:
dbin = ctx.attr.dep[HaskellBinaryInfo].dynamic_bin
if dbin != None:
set.mutable_insert(dyn_libs, dbin)
return [
DefaultInfo(
files = set.to_depset(dyn_libs)
)
]
cc_haskell_import = rule(
_cc_haskell_import,
attrs = {
"dep": attr.label(
doc = """
Target providing a `HaskellLibraryInfo` or `HaskellBinaryInfo`, such as
`haskell_library` or `haskell_binary`.
"""
),
},
toolchains = ["@io_tweag_rules_haskell//haskell:toolchain"],
)
"""Exports a Haskell library as a CC library.
Given a [haskell_library](#haskell_library) or
[haskell_binary](#haskell_binary) input, outputs the shared object files
produced as well as the object files it depends on directly and
transitively. This is very useful if you want to link in a Haskell shared
library from `cc_library`.
There is a caveat: this will not provide any shared libraries that
aren't explicitly given to it. This means that if you're using
`prebuilt_dependencies` and relying on GHC to provide those objects,
they will not be present here. You will have to provide those
separately to your `cc_library`. If you're getting
`prebuilt_dependencies` from your toolchain, you will likely want to
extract those and pass them in as well.
*This rule is temporary and only needed until the Bazel C/C++
"sandwich" (see [bazelbuild/bazel#2163][bazel-cpp-sandwich]) is
implemented. This rule will be deprecated in the future.*
Example:
```bzl
haskell_library(
name = "my-lib",
...
)
cc_haskell_import(
name = "my-lib-objects",
dep = ":my-lib",
)
cc_library(
name = "my-cc",
srcs = ["main.c", ":my-lib-objects"],
)
```
[bazel-cpp-sandwich]: https://github.com/bazelbuild/bazel/issues/2163
"""
|
the-stack_0_8696 | # --------------
##File path for the file
file_path
def read_file(path):
file = open(path,mode='r')
sentence = file.readline()
file.close()
return sentence
sample_message = read_file(file_path)
#Code starts here
# --------------
#Code starts here
message_1 = read_file(file_path_1)
message_2 = read_file(file_path_2)
print(message_1)
print(message_2)
def fuse_msg(message_a,message_b):
message_a = int(message_a)
message_b = int(message_b)
quotient = message_b//message_a
return str(quotient)
secret_msg_1 = fuse_msg(message_1,message_2)
# --------------
#Code starts here
message_3 = read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
sub = " "
if message_c == 'Red':
sub = 'Army General'
elif message_c == 'Green':
sub = 'Data Scientist'
elif message_c == 'Blue':
sub = 'Marine Biologist'
return sub
secret_msg_2 = substitute_msg(message_3)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = read_file(file_path_4)
message_5 = read_file(file_path_5)
print(message_4)
print(message_5)
def compare_msg(message_d,message_e):
a_list = message_d.split()
b_list = message_e.split()
c_list = [i for i in a_list if i not in b_list]
final_msg = ' '.join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4,message_5)
# --------------
#Code starts here
message_6 = read_file(file_path_6)
print(message_6)
def extract_msg(message_f):
a_list = message_6.split()
even_word = lambda x:len(x)%2==0
b_list = filter(even_word,a_list)
final_msg = ' '.join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
def write_file(secret_msg,path):
file = open(path,mode='a+')
file.write(secret_msg)
file.close()
write_file(secret_msg,final_path)
print(secret_msg)
|
the-stack_0_8697 | #!/usr/bin/env python
import pytest
import random
import os
import filecmp
from devtools_shorthand_sql import core
random.seed(1234)
@pytest.fixture
def functionbuilder_basic():
fields = [core.IDField('id', 'test'), core.TextField('COL2', 'test2'),
core.IntegerField('col1', 'test')]
sql_writer = core.SQLiteWriter()
x = core.FunctionBuilder('my_table', fields, sql_writer)
return x
def test_base_function():
name, text = 'name', 'text'
base = core.BaseFunction(name, text)
assert base.name == name
assert base.text == text
assert base.__str__() == text
def test_sql_builder_properties():
fields = [core.IntegerField('col1', 'test'), core.TextField('COL2', 'test2')]
sql_writer = core.SQLiteWriter
x = core.FunctionBuilder('my_table', fields, sql_writer)
assert x.arguments == 'col1: int, col2: str'
assert x.field_names == 'col1, COL2'
assert x.params == 'col1, col2'
assert x.function_name_stem == 'my_table'
assert x.has_idfield is False
assert x.kwargs == 'col1=902, col2="ED73BYDMA9"'
def test_sql_builder_create_table_statement(functionbuilder_basic):
x = functionbuilder_basic
result = x.create_table_statement()
assert result == 'CREATE TABLE IF NOT EXISTS my_table (\nid test,\nCOL2 test2,\ncol1 test\n);'
def test_sql_builder_create_insert_function_with_id(functionbuilder_basic):
x = functionbuilder_basic
result = x.create_insert_function_with_id()
assert result.text == '\ndef insert_my_table(id: int, col2: str, col1: int) -> int:\n params = (id, col2, col1)\n id = YOUR_CONNECTOR_EXECUTOR("""INSERT INTO my_table (id, COL2, col1) VALUES(?,?,?);""",\n params)\n return id\n'
def test_sql_builder_create_insert_function_without_id(functionbuilder_basic):
x = functionbuilder_basic
result = x.create_insert_function_without_id()
assert result.text == '\ndef insert_my_table(id: int, col2: str, col1: int) -> None:\n params = (id, col2, col1)\n YOUR_CONNECTOR_EXECUTOR("""INSERT INTO my_table (id, COL2, col1) VALUES(?,?,?);""",\n params)\n return\n'
def test_sql_builder_create_insert_function_with_id_test(functionbuilder_basic):
expected = """
def test_insert_my_table(YOUR_CLEAN_DB_FIXTURE):
expected = (1, 'AXRQDZ4S5I', 954)
new_id = YOUR_MODULE.insert_my_table(col2="AXRQDZ4S5I", col1=954)
result = YOUR_CONNECTOR_QUERY('SELECT * FROM my_table').fetchall()[0]
assert result == expected
assert new_id == 1
"""
x = functionbuilder_basic
result = x.create_insert_function_with_id_test()
assert result.text == expected
def test_sql_builder_create_insert_function_without_id_test(functionbuilder_basic):
expected = """
def test_insert_my_table(YOUR_CLEAN_DB_FIXTURE):
expected = (1, 'CYSB3CK4JX', 409)
YOUR_MODULE.insert_my_table(col2="CYSB3CK4JX", col1=409)
result = YOUR_CONNECTOR_QUERY('SELECT * FROM my_table').fetchall()[0]
assert result == expected
"""
x = functionbuilder_basic
result = x.create_insert_function_without_id_test()
assert result.text == expected
@pytest.mark.parametrize("source,sql_name_format,fixture_file",
[
# Show none leaves sql columns unchange
("""# photo
id,id
SIZE,int
filename,text
date_taken,int""", 'none', 'basic_output.txt'),
# Show upper makes sql columns upper
("""# photo
id,id
size,int
filename,text
date_taken,int""", 'upper', 'basic_output_upper.txt'),
# Show lower makes sql columns lower
("""# photo
ID,id
SIZE,int
FILENAME,text
DATE_TAKEN,int""", 'lower', 'basic_output_lower.txt'),
# Show proper makes sql columns proper
("""# photo
ID,id
SIZE,int
FILENAME,text
DATE_TAKEN,int""", 'proper', 'basic_output_proper.txt'),
])
def test_main_pass(tmpdir, source, sql_name_format, fixture_file):
expected = os.path.join('tests', 'fixtures', fixture_file)
filename = os.path.join(tmpdir, 'shorthand.txt')
with open(filename, 'w') as f:
f.write(source)
output_filename = os.path.join(tmpdir, 'output.txt')
core.main(filename, 'sqlite', output_filename, sql_name_format)
if not filecmp.cmp(expected, output_filename):
import shutil
shutil.copy(output_filename, 'test_result.txt')
assert filecmp.cmp(expected, output_filename)
|
the-stack_0_8699 | from django.db import models
from django.contrib.postgres.fields import JSONField
class Log(models.Model):
started_on = models.DateTimeField(auto_now_add=True)
finished_on = models.DateTimeField(blank=True, null=True)
finished_successfully = models.NullBooleanField()
command_name = models.TextField()
args = JSONField(blank=True, null=True)
stdout = models.TextField(blank=True, null=True)
stderr = models.TextField(blank=True, null=True)
traceback = models.TextField(blank=True, null=True)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
return super().save(
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields,
)
def __str__(self):
return f'Results of command "{self.command_name}" ran on {self.started_on}'
|
the-stack_0_8700 | import csv
import re
from lxml.html import fromstring
class CsvCallback:
def __init__(self):
self.writer = csv.writer(open('../data/countries_or_districts.csv', 'w'))
self.fields = ('area', 'population', 'iso', 'country_or_district', 'capital',
'continent', 'tld', 'currency_code', 'currency_name',
'phone', 'postal_code_format', 'postal_code_regex',
'languages', 'neighbours')
self.writer.writerow(self.fields)
def __call__(self, url, html):
if re.search('/view/', url):
tree = fromstring(html)
all_rows = [
tree.xpath('//tr[@id="places_%s__row"]/td[@class="w2p_fw"]' % field)[0].text_content()
for field in self.fields]
self.writer.writerow(all_rows)
|
the-stack_0_8705 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@io_bazel_rules_rust//rust:private/utils.bzl", "relative_path")
load("@io_bazel_rules_rust//rust:private/legacy_cc_starlark_api_shim.bzl", "get_libs_for_static_executable")
load(
"@bazel_tools//tools/build_defs/cc:action_names.bzl",
"CPP_LINK_EXECUTABLE_ACTION_NAME",
)
load(
"@bazel_tools//tools/cpp:toolchain_utils.bzl",
"find_cpp_toolchain",
)
load("@bazel_skylib//lib:versions.bzl", "versions")
load("@bazel_version//:def.bzl", "BAZEL_VERSION")
CrateInfo = provider(
fields = {
"name": "str: The name of this crate.",
"type": "str: The type of this crate. eg. lib or bin",
"root": "File: The source File entrypoint to this crate, eg. lib.rs",
"srcs": "List[File]: All source Files that are part of the crate.",
"deps": "List[Provider]: This crate's (rust or cc) dependencies' providers.",
"proc_macro_deps": "List[CrateInfo]: This crate's rust proc_macro dependencies' providers.",
"aliases": "Dict[Label, String]: Renamed and aliased crates",
"output": "File: The output File that will be produced, depends on crate type.",
"edition": "str: The edition of this crate.",
"rustc_env": """Dict[String, String]: Additional `"key": "value"` environment variables to set for rustc.""",
},
)
BuildInfo = provider(
fields = {
"flags": """File: file containing additional flags to pass to rustc""",
"out_dir": """File: directory containing the result of a build script""",
"rustc_env": """File: file containing additional environment variables to set for rustc.""",
"dep_env": """File: extra build script environment varibles to be set to direct dependencies.""",
"link_flags": """File: file containing flags to pass to the linker""",
},
)
AliasableDep = provider(
fields = {
"name": "str",
"dep": "CrateInfo",
},
)
DepInfo = provider(
fields = {
"direct_crates": "depset[CrateInfo]",
"transitive_crates": "depset[CrateInfo]",
"transitive_dylibs": "depset[File]",
"transitive_staticlibs": "depset[File]",
"transitive_libs": "List[File]: All transitive dependencies, not filtered by type.",
"transitive_build_infos": "depset[BuildInfo]",
"dep_env": """File: File with environment variables direct dependencies build scripts rely upon.""",
},
)
def _get_rustc_env(ctx, toolchain):
version = ctx.attr.version if hasattr(ctx.attr, "version") else "0.0.0"
major, minor, patch = version.split(".", 2)
if "-" in patch:
patch, pre = patch.split("-", 1)
else:
pre = ""
return {
"CARGO_PKG_VERSION": version,
"CARGO_PKG_VERSION_MAJOR": major,
"CARGO_PKG_VERSION_MINOR": minor,
"CARGO_PKG_VERSION_PATCH": patch,
"CARGO_PKG_VERSION_PRE": pre,
"CARGO_PKG_AUTHORS": "",
"CARGO_PKG_NAME": ctx.label.name,
"CARGO_PKG_DESCRIPTION": "",
"CARGO_PKG_HOMEPAGE": "",
"CARGO_CFG_TARGET_OS": toolchain.os,
"CARGO_CFG_TARGET_ARCH": toolchain.target_arch,
}
def get_compilation_mode_opts(ctx, toolchain):
comp_mode = ctx.var["COMPILATION_MODE"]
if not comp_mode in toolchain.compilation_mode_opts:
fail("Unrecognized compilation mode {} for toolchain.".format(comp_mode))
return toolchain.compilation_mode_opts[comp_mode]
def get_lib_name(lib):
"""Returns the name of a library artifact, eg. libabc.a -> abc"""
libname, ext = lib.basename.split(".", 2)
if libname.startswith("lib"):
return libname[3:]
else:
return libname
def collect_deps(label, deps, proc_macro_deps, aliases, toolchain):
"""
Walks through dependencies and collects the transitive dependencies.
Args:
label: str: Label of the current target.
deps: List[Label]: The deps from ctx.attr.deps.
proc_macro_deps: List[Label]: The proc_macro deps from ctx.attr.proc_macro_deps.
Returns:
Returns a DepInfo provider.
"""
for dep in deps:
if CrateInfo in dep:
if dep[CrateInfo].type == "proc-macro":
fail(
"{} listed {} in its deps, but it is a proc-macro. It should instead be in proc-macro-deps.".format(
label,
dep.label,
)
)
for dep in proc_macro_deps:
type = dep[CrateInfo].type
if type != "proc-macro":
fail(
"{} listed {} in its proc_macro_deps, but it is not proc-macro, it is a {}. It should probably instead be listed in deps.".format(
label,
dep.label,
type,
)
)
# TODO: Fix depset union (https://docs.bazel.build/versions/master/skylark/depsets.html)
direct_crates = []
transitive_crates = depset()
transitive_dylibs = depset(order = "topological") # dylib link flag ordering matters.
transitive_staticlibs = depset()
transitive_build_infos = depset()
build_info = None
aliases = {k.label: v for k,v in aliases.items()}
for dep in deps + proc_macro_deps:
if CrateInfo in dep:
# This dependency is a rust_library
direct_dep = dep[CrateInfo]
aliasable_dep = AliasableDep(
name = aliases.get(dep.label, direct_dep.name),
dep = direct_dep,
)
direct_crates += [aliasable_dep]
transitive_crates = depset([dep[CrateInfo]], transitive = [transitive_crates])
transitive_crates = depset(transitive = [transitive_crates, dep[DepInfo].transitive_crates])
transitive_dylibs = depset(transitive = [transitive_dylibs, dep[DepInfo].transitive_dylibs])
transitive_staticlibs = depset(transitive = [transitive_staticlibs, dep[DepInfo].transitive_staticlibs])
transitive_build_infos = depset(transitive = [transitive_build_infos, dep[DepInfo].transitive_build_infos])
elif CcInfo in dep:
# This dependency is a cc_library
# TODO: We could let the user choose how to link, instead of always preferring to link static libraries.
libs = get_libs_for_static_executable(dep)
dylibs = [l for l in libs.to_list() if l.basename.endswith(toolchain.dylib_ext)]
staticlibs = [l for l in libs.to_list() if l.basename.endswith(toolchain.staticlib_ext)]
transitive_dylibs = depset(transitive = [transitive_dylibs, depset(dylibs)])
transitive_staticlibs = depset(transitive = [transitive_staticlibs, depset(staticlibs)])
elif BuildInfo in dep:
if build_info:
fail("Several deps are providing build information, only one is allowed in the dependencies", "deps")
build_info = dep[BuildInfo]
transitive_build_infos = depset([build_info], transitive = [transitive_build_infos])
else:
fail("rust targets can only depend on rust_library, rust_*_library or cc_library targets." + str(dep), "deps")
transitive_libs = depset(
[c.output for c in transitive_crates.to_list()],
transitive = [transitive_staticlibs, transitive_dylibs],
)
return (
DepInfo(
direct_crates = depset(direct_crates),
transitive_crates = transitive_crates,
transitive_dylibs = transitive_dylibs,
transitive_staticlibs = transitive_staticlibs,
transitive_libs = transitive_libs.to_list(),
transitive_build_infos = transitive_build_infos,
dep_env = build_info.dep_env if build_info else None,
),
build_info,
)
def get_linker_and_args(ctx, rpaths):
if (len(BAZEL_VERSION) == 0 or
versions.is_at_least("0.18.0", BAZEL_VERSION)):
user_link_flags = ctx.fragments.cpp.linkopts
else:
user_link_flags = depset(ctx.fragments.cpp.linkopts)
cc_toolchain = find_cpp_toolchain(ctx)
kwargs = {
"ctx": ctx,
} if len(BAZEL_VERSION) == 0 or versions.is_at_least(
"0.25.0",
BAZEL_VERSION,
) else {}
feature_configuration = cc_common.configure_features(
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
**kwargs
)
link_variables = cc_common.create_link_variables(
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
is_linking_dynamic_library = False,
runtime_library_search_directories = rpaths,
user_link_flags = user_link_flags,
)
link_args = cc_common.get_memory_inefficient_command_line(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
variables = link_variables,
)
link_env = cc_common.get_environment_variables(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
variables = link_variables,
)
ld = cc_common.get_tool_for_action(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
)
return ld, link_args, link_env
def _process_build_scripts(
ctx,
file,
crate_info,
build_info,
dep_info,
compile_inputs):
extra_inputs, prep_commands, dynamic_env, dynamic_build_flags = _create_out_dir_action(ctx, file, build_info, dep_info)
if extra_inputs:
compile_inputs = depset(extra_inputs, transitive = [compile_inputs])
return compile_inputs, prep_commands, dynamic_env, dynamic_build_flags
def collect_inputs(
ctx,
file,
files,
toolchain,
crate_info,
dep_info,
build_info):
linker_script = getattr(file, "linker_script") if hasattr(file, "linker_script") else None
if (len(BAZEL_VERSION) == 0 or
versions.is_at_least("0.25.0", BAZEL_VERSION)):
linker_depset = find_cpp_toolchain(ctx).all_files
else:
linker_depset = depset(files._cc_toolchain)
compile_inputs = depset(
crate_info.srcs +
getattr(files, "data", []) +
dep_info.transitive_libs +
[toolchain.rustc] +
toolchain.crosstool_files +
([build_info.rustc_env, build_info.flags] if build_info else []) +
([] if linker_script == None else [linker_script]),
transitive = [
toolchain.rustc_lib.files,
toolchain.rust_lib.files,
linker_depset,
],
)
return _process_build_scripts(ctx, file, crate_info, build_info, dep_info, compile_inputs)
def construct_arguments(
ctx,
file,
toolchain,
crate_info,
dep_info,
output_hash,
rust_flags,
dynamic_env):
output_dir = getattr(crate_info.output, "dirname") if hasattr(crate_info.output, "dirname") else None
linker_script = getattr(file, "linker_script") if hasattr(file, "linker_script") else None
env = _get_rustc_env(ctx, toolchain)
args = ctx.actions.args()
args.add(crate_info.root)
args.add("--crate-name=" + crate_info.name)
args.add("--crate-type=" + crate_info.type)
# Mangle symbols to disambiguate crates with the same name
extra_filename = "-" + output_hash if output_hash else ""
args.add("--codegen=metadata=" + extra_filename)
if output_dir:
args.add("--out-dir=" + output_dir)
args.add("--codegen=extra-filename=" + extra_filename)
compilation_mode = get_compilation_mode_opts(ctx, toolchain)
args.add("--codegen=opt-level=" + compilation_mode.opt_level)
args.add("--codegen=debuginfo=" + compilation_mode.debug_info)
args.add("--emit=dep-info,link")
args.add("--color=always")
args.add("--target=" + toolchain.target_triple)
if hasattr(ctx.attr, "crate_features"):
args.add_all(getattr(ctx.attr, "crate_features"), before_each = "--cfg", format_each = 'feature="%s"')
if linker_script:
args.add(linker_script.path, format = "--codegen=link-arg=-T%s")
# Gets the paths to the folders containing the standard library (or libcore)
rust_lib_paths = depset([file.dirname for file in toolchain.rust_lib.files.to_list()]).to_list()
# Tell Rustc where to find the standard library
args.add_all(rust_lib_paths, before_each = "-L", format_each = "%s")
args.add_all(rust_flags)
args.add_all(getattr(ctx.attr, "rustc_flags", []))
add_edition_flags(args, crate_info)
# Link!
# Rust's built-in linker can handle linking wasm files. We don't want to attempt to use the cc
# linker since it won't understand.
if toolchain.target_arch != "wasm32":
rpaths = _compute_rpaths(toolchain, output_dir, dep_info)
ld, link_args, link_env = get_linker_and_args(ctx, rpaths)
env.update(link_env)
args.add("--codegen=linker=" + ld)
args.add_joined("--codegen", link_args, join_with = " ", format_joined = "link-args=%s")
add_native_link_flags(args, dep_info)
add_crate_link_flags(args, dep_info)
# Make bin crate data deps available to tests.
for data in getattr(ctx.attr, "data", []):
if CrateInfo in data:
dep_crate_info = data[CrateInfo]
if dep_crate_info.type == "bin":
env["CARGO_BIN_EXE_" + dep_crate_info.output.basename] = dep_crate_info.output.short_path
# Update environment with user provided variables.
env.update(crate_info.rustc_env)
# This empty value satisfies Clippy, which otherwise complains about the
# sysroot being undefined.
env["SYSROOT"] = ""
# Certain rust build processes expect to find files from the environment variable
# `$CARGO_MANIFEST_DIR`. Examples of this include pest, tera, asakuma.
#
# The compiler and by extension proc-macros see the current working directory as the Bazel exec
# root. Therefore, in order to fix this without an upstream code change, we have to set
# `$CARGO_MANIFEST_DIR`.
#
# As such we attempt to infer `$CARGO_MANIFEST_DIR`.
# Inference cannot be derived from `attr.crate_root`, as this points at a source file which may or
# may not follow the `src/lib.rs` convention. As such we use `ctx.build_file_path` mapped into the
# `exec_root`. Since we cannot (seemingly) get the `exec_root` from skylark, we cheat a little
# and use `$(pwd)` which resolves the `exec_root` at action execution time.
package_dir = ctx.build_file_path[:ctx.build_file_path.rfind("/")]
dynamic_env["CARGO_MANIFEST_DIR"] = "${{EXEC_ROOT}}/{}".format(package_dir)
return args, env, dynamic_env
def construct_compile_command(
ctx,
command,
toolchain,
crate_info,
build_info,
dep_info,
prep_commands,
dynamic_env,
dynamic_build_flags):
# Handle that the binary name and crate name may be different.
#
# If a target name contains a - then cargo (and rules_rust) will generate a
# crate name with _ instead. Accordingly, rustc will generate a output
# file (executable, or rlib, or whatever) with _ not -. But when cargo
# puts a binary in the target/${config} directory, and sets environment
# variables like `CARGO_BIN_EXE_${binary_name}` it will use the - version
# not the _ version. So we rename the rustc-generated file (with _s) to
# have -s if needed.
maybe_rename = ""
if crate_info.type == "bin" and crate_info.output != None:
generated_file = crate_info.name
if toolchain.target_arch == "wasm32":
generated_file = generated_file + ".wasm"
src = "/".join([crate_info.output.dirname, generated_file])
dst = crate_info.output.path
if src != dst:
maybe_rename = " && /bin/mv {src} {dst}".format(src=src, dst=dst)
# Set ${EXEC_ROOT} so that actions which chdir still work.
# See https://github.com/google/cargo-raze/issues/71#issuecomment-433225853 for the rationale as
# to why.
return 'export EXEC_ROOT=$(pwd) && {} && {} "$@" --remap-path-prefix="$(pwd)"=__bazel_redacted_pwd {}{}'.format(
" && ".join(["export {}={}".format(key, value) for key, value in dynamic_env.items()] + prep_commands),
command,
" ".join(dynamic_build_flags),
maybe_rename,
)
def rustc_compile_action(
ctx,
toolchain,
crate_info,
output_hash = None,
rust_flags = []):
"""
Constructs the rustc command used to build the current target.
Returns:
List[Provider]: A list of the following providers:
- CrateInfo: info for the crate we just built; same as `crate_info` parameter.
- DepInfo: The transitive dependencies of this crate.
- DefaultInfo: The output file for this crate, and its runfiles.
"""
dep_info, build_info = collect_deps(
ctx.label,
crate_info.deps,
crate_info.proc_macro_deps,
crate_info.aliases,
toolchain,
)
compile_inputs, prep_commands, dynamic_env, dynamic_build_flags = collect_inputs(
ctx,
ctx.file,
ctx.files,
toolchain,
crate_info,
dep_info,
build_info,
)
args, env, dynamic_env = construct_arguments(
ctx,
ctx.file,
toolchain,
crate_info,
dep_info,
output_hash,
rust_flags,
dynamic_env,
)
command = construct_compile_command(
ctx,
toolchain.rustc.path,
toolchain,
crate_info,
build_info,
dep_info,
prep_commands,
dynamic_env,
dynamic_build_flags,
)
if hasattr(ctx.attr, "version") and ctx.attr.version != "0.0.0":
formatted_version = " v{}".format(ctx.attr.version)
else:
formatted_version = ""
ctx.actions.run_shell(
command = command,
inputs = compile_inputs,
outputs = [crate_info.output],
env = env,
arguments = [args],
mnemonic = "Rustc",
progress_message = "Compiling Rust {} {}{} ({} files)".format(
crate_info.type,
ctx.label.name,
formatted_version,
len(crate_info.srcs),
),
)
runfiles = ctx.runfiles(
files = dep_info.transitive_dylibs.to_list() + getattr(ctx.files, "data", []),
collect_data = True,
)
out_binary = False
if hasattr(ctx.attr, "out_binary"):
out_binary = getattr(ctx.attr, "out_binary")
return [
crate_info,
dep_info,
DefaultInfo(
# nb. This field is required for cc_library to depend on our output.
files = depset([crate_info.output]),
runfiles = runfiles,
executable = crate_info.output if crate_info.type == "bin" or out_binary else None,
),
]
def add_edition_flags(args, crate):
if crate.edition != "2015":
args.add("--edition={}".format(crate.edition))
def _create_out_dir_action(ctx, file, build_info, dep_info):
tar_file_attr = getattr(file, "out_dir_tar", None)
if build_info and tar_file_attr:
fail("Target {} has both a build_script dependency and an out_dir_tar - this is not allowed.".format(ctx.label))
prep_commands = []
input_files = []
# Env vars and build flags which need to be set in the action's command line, rather than on the action's env,
# because they rely on other env vars or commands.
dynamic_env = {}
dynamic_build_flags = []
# TODO: Remove system tar usage
if build_info:
prep_commands.append("export $(cat %s)" % build_info.rustc_env.path)
# out_dir will be added as input by the transitive_build_infos loop below.
dynamic_env["OUT_DIR"] = "${{EXEC_ROOT}}/{}".format(build_info.out_dir.path)
dynamic_build_flags.append("$(cat '%s')" % build_info.flags.path)
elif tar_file_attr:
out_dir = ".out-dir"
prep_commands.append("mkdir -p $OUT_DIR")
prep_commands.append("tar -xzf {tar} -C $OUT_DIR".format(tar=tar_file_attr.path))
input_files.append(tar_file_attr)
dynamic_env["OUT_DIR"] = "${{EXEC_ROOT}}/{}".format(out_dir)
# This should probably only actually be exposed to actions which link.
for dep_build_info in dep_info.transitive_build_infos.to_list():
input_files.append(dep_build_info.out_dir)
dynamic_build_flags.append("$(cat '{}' | sed -e \"s#\${{EXEC_ROOT}}#${{EXEC_ROOT}}#g\")".format(dep_build_info.link_flags.path))
input_files.append(dep_build_info.link_flags)
return input_files, prep_commands, dynamic_env, dynamic_build_flags
def _compute_rpaths(toolchain, output_dir, dep_info):
"""
Determine the artifact's rpaths relative to the bazel root
for runtime linking of shared libraries.
"""
if not dep_info.transitive_dylibs:
return depset([])
if toolchain.os != "linux":
fail("Runtime linking is not supported on {}, but found {}".format(
toolchain.os,
dep_info.transitive_dylibs,
))
# Multiple dylibs can be present in the same directory, so deduplicate them.
return depset([
relative_path(output_dir, lib_dir)
for lib_dir in _get_dir_names(dep_info.transitive_dylibs.to_list())
])
def _get_dir_names(files):
dirs = {}
for f in files:
dirs[f.dirname] = None
return dirs.keys()
def add_crate_link_flags(args, dep_info):
# nb. Crates are linked via --extern regardless of their crate_type
args.add_all(dep_info.direct_crates, map_each = _crate_to_link_flag)
args.add_all(
dep_info.transitive_crates,
map_each = _get_crate_dirname,
uniquify = True,
format_each = "-Ldependency=%s",
)
def _crate_to_link_flag(crate_info):
return ["--extern", "{}={}".format(crate_info.name, crate_info.dep.output.path)]
def _get_crate_dirname(crate):
return crate.output.dirname
def add_native_link_flags(args, dep_info):
native_libs = depset(transitive = [dep_info.transitive_dylibs, dep_info.transitive_staticlibs])
args.add_all(native_libs, map_each = _get_dirname, uniquify = True, format_each = "-Lnative=%s")
args.add_all(dep_info.transitive_dylibs, map_each = get_lib_name, format_each = "-ldylib=%s")
args.add_all(dep_info.transitive_staticlibs, map_each = get_lib_name, format_each = "-lstatic=%s")
def _get_dirname(file):
return file.dirname
|
the-stack_0_8706 | from panda3d.core import TextNode
from direct.gui.DirectGui import DirectFrame
from direct.gui.DirectGui import DirectButton
from direct.gui.DirectGui import DirectLabel
from direct.gui import DirectGuiGlobals
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class JellybeanRewardGui(DirectFrame):
notify = directNotify.newCategory('JellybeanRewardGui')
PreCountdownDelay = 1.0
CountDownRate = 0.2
JarLabelTextColor = (0.95, 0.95, 0.0, 1.0)
JarLabelMaxedTextColor = (1.0, 0.0, 0.0, 1.0)
def __init__(self, doneEvent):
self.doneEvent = doneEvent
DirectFrame.__init__(self)
self.reparentTo(aspect2d)
self.setPos(0.0, 0.0, 0.16)
self.stash()
publicPartyGui = loader.loadModel('phase_4/models/parties/publicPartyGUI')
self.frame = DirectFrame(parent=self, geom=publicPartyGui.find('**/activities_background'), geom_pos=(-0.8, 0.0, 0.2), geom_scale=2.0, relief=None)
self.earnedLabel = DirectLabel(parent=self, relief=None, text=str(0), text_align=TextNode.ACenter, text_pos=(0.0, -0.07), text_scale=0.2, text_fg=(0.95, 0.95, 0.0, 1.0), text_font=ToontownGlobals.getSignFont(), textMayChange=True, image=DirectGuiGlobals.getDefaultDialogGeom(), image_scale=(0.33, 1.0, 0.33), pos=(-0.3, 0.0, 0.2), scale=0.9)
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
jarImage = purchaseModels.find('**/Jar')
self.jarLabel = DirectLabel(parent=self, relief=None, text=str(0), text_align=TextNode.ACenter, text_pos=(0.0, -0.07), text_scale=0.2, text_fg=JellybeanRewardGui.JarLabelTextColor, text_font=ToontownGlobals.getSignFont(), textMayChange=True, image=jarImage, scale=0.7, pos=(0.3, 0.0, 0.17))
purchaseModels.removeNode()
del purchaseModels
jarImage.removeNode()
del jarImage
self.messageLabel = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_wordwrap=16.0, text_scale=0.07, pos=(-0.52, 0.0, -0.1), textMayChange=True)
self.doubledJellybeanLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.PartyRewardDoubledJellybean, text_align=TextNode.ACenter, text_wordwrap=12.0, text_scale=0.09, text_fg=(1.0, 0.125, 0.125, 1.0), pos=(0.0, 0.0, -0.465), textMayChange=False)
self.doubledJellybeanLabel.hide()
self.closeButton = DirectButton(parent=self, relief=None, text=TTLocalizer.PartyJellybeanRewardOK, text_align=TextNode.ACenter, text_scale=0.065, text_pos=(0.0, -0.625), geom=(publicPartyGui.find('**/startButton_up'),
publicPartyGui.find('**/startButton_down'),
publicPartyGui.find('**/startButton_rollover'),
publicPartyGui.find('**/startButton_inactive')), geom_pos=(-0.39, 0.0, 0.125), command=self._close)
publicPartyGui.removeNode()
del publicPartyGui
self.countSound = base.loader.loadSfx('phase_13/audio/sfx/tick_counter_short.ogg')
self.overMaxSound = base.loader.loadSfx('phase_13/audio/sfx/tick_counter_overflow.ogg')
return
def showReward(self, earnedAmount, jarAmount, message):
JellybeanRewardGui.notify.debug('showReward( earnedAmount=%d, jarAmount=%d, ...)' % (earnedAmount, jarAmount))
self.earnedCount = earnedAmount
self.earnedLabel['text'] = str(self.earnedCount)
self.jarCount = jarAmount
self.jarMax = base.localAvatar.getMaxMoney()
self.jarLabel['text'] = str(self.jarCount)
self.jarLabel['text_fg'] = JellybeanRewardGui.JarLabelTextColor
self.messageLabel['text'] = message
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_DAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_PARTIES_HOLIDAY_MONTH):
self.doubledJellybeanLabel.show()
else:
self.doubledJellybeanLabel.hide()
self.unstash()
taskMgr.doMethodLater(JellybeanRewardGui.PreCountdownDelay, self.transferOneJellybean, 'JellybeanRewardGuiTransferOneJellybean', extraArgs=[])
def transferOneJellybean(self):
if self.earnedCount == 0:
return
self.earnedCount -= 1
self.earnedLabel['text'] = str(self.earnedCount)
self.jarCount += 1
if self.jarCount <= self.jarMax:
self.jarLabel['text'] = str(self.jarCount)
elif self.jarCount > self.jarMax:
self.jarLabel['text_fg'] = JellybeanRewardGui.JarLabelMaxedTextColor
if self.jarCount <= self.jarMax:
base.playSfx(self.countSound)
else:
base.playSfx(self.overMaxSound)
taskMgr.doMethodLater(JellybeanRewardGui.CountDownRate, self.transferOneJellybean, 'JellybeanRewardGuiTransferOneJellybean', extraArgs=[])
def _close(self):
taskMgr.remove('JellybeanRewardGuiTransferOneJellybean')
self.stash()
messenger.send(self.doneEvent)
def destroy(self):
taskMgr.remove('JellybeanRewardGuiTransferOneJellybean')
del self.countSound
del self.overMaxSound
self.frame.destroy()
self.earnedLabel.destroy()
self.jarLabel.destroy()
self.messageLabel.destroy()
self.closeButton.destroy()
DirectFrame.destroy(self)
|
the-stack_0_8708 | #!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $'
__version__ = '$Revision: 1.12 $'[11:-2]
__author__ = 'Stuart Bishop <[email protected]>'
import unittest
import time
import sys
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception hierarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propagates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.assertTrue(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.assertTrue(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined hierarchy.
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
self.assertTrue(issubclass(self.driver.Warning,Exception))
self.assertTrue(issubclass(self.driver.Error,Exception))
else:
self.assertTrue(issubclass(self.driver.Warning,Exception))
self.assertTrue(issubclass(self.driver.Error,Exception))
self.assertTrue(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.assertTrue(con.Warning is drv.Warning)
self.assertTrue(con.Error is drv.Error)
self.assertTrue(con.InterfaceError is drv.InterfaceError)
self.assertTrue(con.DatabaseError is drv.DatabaseError)
self.assertTrue(con.OperationalError is drv.OperationalError)
self.assertTrue(con.IntegrityError is drv.IntegrityError)
self.assertTrue(con.InternalError is drv.InternalError)
self.assertTrue(con.ProgrammingError is drv.ProgrammingError)
self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
# Issue discussed on DB-SIG: consensus seem that close() should not
# raised if called on closed objects. Issue reported back to Stuart.
# self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.assertTrue(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.assertTrue(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.assertTrue(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.assertTrue(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.assertTrue(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.assertTrue(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependent
raise NotImplementedError('Driver needed to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes('Something'))
b = self.driver.Binary(str2bytes(''))
def test_STRING(self):
self.assertTrue(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.assertTrue(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.assertTrue(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.assertTrue(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.assertTrue(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
the-stack_0_8711 | # Copyright (c) Open-MMLab. All rights reserved.
from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr,
gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert,
rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb)
from .geometric import (cutout, imcrop, imflip, imflip_, impad,
impad_to_multiple, imrescale, imresize, imresize_like,
imresize_to_multiple, imrotate, imshear, imtranslate,
rescale_size)
from .io import imfrombytes, imread, imwrite, supported_backends, use_backend
from .misc import tensor2imgs
from .photometric import (adjust_brightness, adjust_color, adjust_contrast,
adjust_lighting, adjust_sharpness, auto_contrast,
clahe, imdenormalize, imequalize, iminvert,
imnormalize, imnormalize_, lut_transform, posterize,
solarize)
__all__ = [
'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb',
'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale',
'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size',
'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate',
'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend',
'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize',
'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr',
'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize',
'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe',
'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting'
]
|
the-stack_0_8712 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#author:fugui
from typing import Counter, Text
import urllib.request
import ssl
import json
import os
import sys
import datetime
#定义11点 用于开启server 酱推送
global d_time0,d_time1,d_time2,n_time
d_time0 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '11:00', '%Y-%m-%d%H:%M')
n_time = datetime.datetime.now()
#定义抢大额红包时间段d_time3和d_time4和d_time5和d_time6之间 ,d_time4提前11分钟意在防止下午红包池提前10分钟关闭和脚本抢大额红包有些地区到最后一刻10元以上红包都有剩余导致脚本报错,
# 若到最后一刻会自动放弃监测,抢所拥有的必中符的面值保底
###默认 抢大额(15元以上) 时间段为下午17:00点到16:49分和晚上21:00到23点59分 不建议进行更改
##以下默认中午试图抢大额红包 前提是道具库存中有10元以上必中符!!!!!!!!!
global d_time3,d_time4,d_time5,d_time6
d_time3 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '17:00', '%Y-%m-%d%H:%M')
d_time4 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '20:49', '%Y-%m-%d%H:%M')
d_time5 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '21:00', '%Y-%m-%d%H:%M')
d_time6 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '23:59', '%Y-%m-%d%H:%M')
#d_time6定义几点前不使用必中符,注意是不使用!!!若时间定义为17:00点,也就是17:00点之前的抽奖不会使用必中符,优先级高于自定义的大额抢红包时间,以节约道具库中的有效的必中符
##若d_time6定义为11:00点,则代表不对使用必中符时间进行限制,切记不能删除d_time7,若不需限制,只需将d_time7时间改为11:00,注意是英文的冒号
global d_time7
d_time7 = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '11:00', '%Y-%m-%d%H:%M')
### 定义红包豆攒到多少数量才会执行兑换必中符脚本,以免一直兑换减5元的必中符
setexchangedou = int(sys.argv[12])
#关闭ssl校验,用于抓包调试请求
ssl._create_default_https_context = ssl._create_unverified_context
#定义短期(半年以上)不会变的量
parActivityId="Gh1tkq-wvFU2xEP_ZPzHPQ"
wm_ctype="mtandroid"
#以下portraitId参数含义未知,用于每日浏览天天神卷30s后可领30豆的请求
portraitId=498
#定义精简通用请求头部
head={"Host": "i.waimai.meituan.com","User-Agent":"MeituanGroup/11.9.208","x-requested-with": "XMLHttpRequest","content-type":"application/x-www-form-urlencoded"}
#定义美团外卖服务器地址
baseurl=r"https://i.waimai.meituan.com"
#定义 pushPlus 的webhook地址,用于企业微信等渠道的推送,默认为空,若采用企业微信,请手动填写
global webhook
webhook = sys.argv[1]
#定义全局变量并初始化 以下初始化赋值的变量不要改!!!!
global propIdforuse,token,batchId,propId
showPriceNumber = "1"
propIdforuse =2
batchId = "haha"
wm_latitude =sys.argv[2]
wm_longitude=sys.argv[3]
token =sys.argv[4]
propId=sys.argv[5]
exchangeCoinNumber=sys.argv[6]
serverkey=sys.argv[7]
pushPlusToken =sys.argv[8]
yesornot = sys.argv[9]
yesornot2 = sys.argv[10]
leftdou=0
counttime = 0
cwd = os.path.dirname(os.path.realpath(__file__))
##############################################################################
##标记这四类红包数量不为空,用来在有10元以上必中符时循环判断红包池余量抢购大额元红包,若您不需该功能,请自行将下一行的1改为0
eight = ten = fifteen = thirty =fifty=sys.argv[11]
##############################################################################
# eight_left= 10
################################################################################
#若在您自定义的抢大额红包时间段中,您无法通过10元以上必中符抢到任何红包!!,则请将下面两行数值改大些,如改成10左右的数字
ten_left=0
fifteen_left=0
thirty_left=0
fifty_left=0
#将print内容同步写到output.txt文件
class Logger(object):
def __init__(self, fileN='Default.log'):
self.terminal = sys.stdout
self.log = open(fileN, 'w+',encoding='utf-8')
def write(self, message):
'''print实际相当于sys.stdout.write'''
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
###获取serverkey
# def getserverkey():
# global yesornot
# global serverkey
# if os.path.exists(str(cwd)+r"/serverkey.txt"):
# # file1 = open(r"./token.txt", mode='r',encoding="UTF-8")
# # token = file1.readline()
# # file1.close
# if os.path.getsize(str(cwd)+r"/serverkey.txt")!=0:
# yesornot = "y"
# else:
# yesornot = "n"
# return -1
# else:
# while True:
# try:
# print("请选择是否开启server酱推送!\n")
# yesornot=input("是否开启server酱推送(y/n):\n")
# if type(yesornot)==str and yesornot=='y':
# print("获取serverkey请访问:https://sct.ftqq.com/\n")
# serverkey=input("请输入serverkey:\n")
# except:
# pass
# if type(yesornot)==str and (yesornot =="n" or yesornot=='y'):
# break
# file =open(str(cwd)+r"/serverkey.txt", mode='w+',encoding="UTF-8")
# file.write(serverkey)
# file.close
###获取pushPlusToken
# def getpushPlusToken():
# global yesornot2
# global pushPlusToken
# if os.path.exists(str(cwd)+r"/pushPlusToken.txt"):
# # file1 = open(r"./token.txt", mode='r',encoding="UTF-8")
# # token = file1.readline()
# # file1.close
# if os.path.getsize(str(cwd)+r"/pushPlusToken.txt")!=0:
# yesornot2 = "y"
# else:
# yesornot2 = "n"
# return -1
# else:
# while True:
# try:
# print("请选择是否开启pushPlus推送\n")
# yesornot2=input("是否开启pushPlus推送(y/n):\n")
# if type(yesornot2)==str and yesornot2=='y':
# print("获取pushPlusToken请访问:https://www.pushplus.plus/\n")
# pushPlusToken=input("请输入pushPlusToken:\n")
# except:
# pass
# if type(yesornot2)==str and (yesornot2 =="n" or yesornot2=='y'):
# break
# file =open(str(cwd)+r"/pushPlusToken.txt", mode='w+',encoding="UTF-8")
# file.write(pushPlusToken)
# file.close
#获取token
# def gettoken():
# if os.path.exists(str(cwd)+r"/token.txt"):
# file1 = open(str(cwd)+r"/token.txt", mode='r',encoding="UTF-8")
# token = file1.readline()
# file1.close
# return token
# else:
# while True:
# try:
# print("获取token方法参考readme.md!\n")
# token=input("请输入token:\n")
# except:
# pass
# if type(token)==str and token !="":
# break
# file =open(str(cwd)+r"/token.txt", mode='w+',encoding="UTF-8")
# file.write(token)
# file.close
# return token
#获取经纬度函数并存入当前目录文本(美团活动为随机地点固定半年以上,各地大额红包概率可能不同,若长期小额,可尝试换地址或换号)
# def getlatlongitude():
# if os.path.exists(str(cwd)+r"/wm_latitudewm_longitude.txt"):
# return -1
# else:
# while True:
# try:
# print("若您不知道🙏限时抢红包开放城市,可试各地省会,如成都(30657401,104065827)\n")
# wm_latitude=eval(input("请输入去除小数点的纬度(如30657401):\n"))
# wm_longitude=eval(input("请输入去除小数点的经度(如104065827):\n"))
# except:
# pass
# if type(wm_latitude)==int and type(wm_longitude)==int :
# break
# file =open(str(cwd)+r"/wm_latitudewm_longitude.txt", mode='w+',encoding="UTF-8")
# file.write(str(wm_latitude)+"\n"+str(wm_longitude))
# file.close
#定义一个云端查询必中符库中所有的propId 和needNumber 的函数,并传给getpropId_Coninnumber()函数作为用户输入参考提示
# def myredbean():
# wm_latitude = 1
# wm_longitude = 1
# print("开始执行从美团接口查询propid 和 needNumber参数脚本:\n")
# datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+str(token)+"&userPortraitId="+str(portraitId)
# url_drawlottery = baseurl+r"/cfeplay/playcenter/batchgrabred/myRedBean"
# request =urllib.request.Request(url_drawlottery,headers=head,data=datas.encode("utf-8"),method="POST")
# try:
# response = urllib.request.urlopen(request,timeout=10)
# result = response.read().decode("utf-8")
# result2 = json.loads(result)
# cent = 1
# if(result2["code"]==0 and result2["subcode"]==0 and len(result2["data"]["propExchangeRuleInfos"])):
# for k in result2["data"]["propExchangeRuleInfos"]:
# print("第%d类必中符 所需设置propId参数为%d\t所需红包豆数量为:%d\t总量为%d\n"%(cent,k["propId"],k["needNumber"],k["amount"]))
# cent=cent+1
# print("一般这几类必中符金额依次为5元 8元 15元,大概率使用后兑换到20-5,25-8,40-15的红包,建议选择面值最大的一类,即propId填5,所需豆子数量填1800即可\n脚本会自动从设定的面值去尝试兑换,逐级尝试面值,直到兑换成功,所以推荐设置默认兑换15面值的必中符\n注意填写的propId和所需豆子数之间是上方的一一对应关系,错误对应将导致兑换失败!\n")
# elif (result2["code"]==1 and result2["subcode"]==-1):
# print("%s,原因:输入token失效或错误 请继续运行程序并输入,脚本将在运行一遍后自动删除异常配置文件!!\n"%(result2["msg"]))
# else:
# print("请求接口失效或参数异常,建议🙏重置参数!\n")
# sys.exit(0)
# except urllib.error.URLError as e:
# if hasattr(e,"code"):
# print("脚本执行失败,错误代码如下:\n")
# print(e.code)
# if hasattr(e,"reason"):
# print(e,"reason")
#定义获得需要兑换的必中符道具类型和兑换所需的豆子
# def getpropId_Coinnumber(token):
# if os.path.exists(str(cwd)+r"/propId_Coinnumbe.txt"):
# return -1
# else:
# while True:
# myredbean(token)
# try:
# propId=eval(input("请输入所需要兑换道具的propId(推荐填写5):\n"))
# exchangeCoinNumber=eval(input("请输入propId对应某类必中符所需的豆子数量(推荐填写1800):\n"))
# except:
# pass
# if type(propId)==int and type(exchangeCoinNumber)==int :
# if propId == 2 or propId == 4 or propId == 5:
# if exchangeCoinNumber ==500 or exchangeCoinNumber ==1000 or exchangeCoinNumber ==1800 :
# break
# file =open(str(cwd)+r"/propId_Coinnumbe.txt", mode='w+',encoding="UTF-8")
# file.write(str(propId)+"\n"+str(exchangeCoinNumber))
# file.close
#定义从文本文件中获取存入变量的函数,第二次运行时不用输入,若需改变经纬度和token,则直接删除文件即可
# def getVar():
# if not os.path.exists(str(cwd)+r"/wm_latitudewm_longitude.txt"):
# print("程序运行中配置文件异常,文件或者权限异常,已自动为您删除脚本目录下所有已生成的txt文档并停止程序!\n")
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# os.remove(str(cwd)+r"/pushPlusToken.txt")
# sys.exit(0)
# file1 = open(str(cwd)+r"/wm_latitudewm_longitude.txt", mode='r',encoding="UTF-8")
# wm_latitude = int(file1.readline())
# wm_longitude = int(file1.readline())
# file1.close()
# file2 = open(str(cwd)+r"/token.txt", mode='r',encoding="UTF-8")
# if not os.path.exists(str(cwd)+r"/token.txt"):
# print("程序运行中配置文件异常,文件或者权限异常,已自动为您删除脚本目录下所有已生成的txt文档并停止程序!\n")
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# os.remove(str(cwd)+r"/pushPlusToken.txt")
# sys.exit(0)
# token = file2.readline()
# file2.close()
# if not os.path.exists(str(cwd)+r"/propId_Coinnumbe.txt"):
# print("程序运行中配置文件异常,文件或者权限异常,已自动为您删除脚本目录下所有已生成的txt文档并停止程序!\n")
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# os.remove(str(cwd)+r"/pushPlusToken.txt")
# sys.exit(0)
# file3 = open(str(cwd)+r"/propId_Coinnumbe.txt", mode='r',encoding="UTF-8")
# propId = int(file3.readline())
# exchangeCoinNumber = int(file3.readline())
# file3.close()
# return wm_latitude,wm_longitude,token,propId,exchangeCoinNumber
##获得pushPlusToken
# def pushPlusTokenvar():
# global pushPlusToken
# if not os.path.exists(str(cwd)+r"/pushPlusToken.txt"):
# print("程序运行中配置文件异常,文件或者权限异常,已自动为您删除脚本目录下所有已生成的txt文档并停止程序!\n")
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# os.remove(str(cwd)+r"/pushPlusToken.txt")
# sys.exit(0)
# file = open(str(cwd)+r"/pushPlusToken.txt", mode='r',encoding="UTF-8")
# pushPlusToken = file.readline()
# file.close()
# return pushPlusToken
##获得serverkey
# def serverkeyvar():
# global serverkey
# if not os.path.exists(str(cwd)+r"/serverkey.txt"):
# print("程序运行中配置文件异常,文件或者权限异常,已自动为您删除脚本目录下所有已生成的txt文档并停止程序!\n")
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# os.remove(str(cwd)+r"/pushPlusToken.txt")
# sys.exit(0)
# file = open(str(cwd)+r"/serverkey.txt", mode='r',encoding="UTF-8")
# serverkey = file.readline()
# file.close()
# return serverkey
#定义获取batchId的函数
def getbatchId():
global wm_latitude,wm_longitude
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行获取batchId脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_ctype="+wm_ctype+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token
url_getbatchId = baseurl+r"/cfeplay/playcenter/batchgrabred/corepage"
request =urllib.request.Request(url_getbatchId,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0):
if "batchId" in result2["data"]:
print("batchId:%s\n"%(result2["data"]["batchId"]))
return result2["data"]["batchId"]
else:
print("获取batchId失败👀,当前非限时抢红包时间段,无法进行下一步,但已为您签到完毕🙏!\n")
os._exit(1)
elif (result2["code"]==1):
print("%s,接口需提交的token参数已改变👀,请重新运行一遍脚本!\n"%(result2["msg"]))
# os.remove(str(cwd)+r"/wm_latitudewm_longitude.txt")
# os.remove(str(cwd)+r"/token.txt")
# os.remove(str(cwd)+r"/propId_Coinnumbe.txt")
# os.remove(str(cwd)+r"/serverkey.txt")
# sys.exit(0)
os._exit(1)
else:
print("获取batchId错误👀,请检查网络,否则为接口失效!\n")
os._exit(1)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义每天七次签到领豆的函数,需传入获取的token
def signForBeans():
global token
print("**开始执行签到领豆脚本:** \n")
datas = "token="+token
url_signforbeans = baseurl+r"/cfeplay/playcenter/batchgrabred/drawPoints/v2"
request =urllib.request.Request(url_signforbeans,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0):
print("👴%s\n"%(result2["msg"]))
elif (result2["code"]==1):
print("👴未到领取时间或已经领取完了(每天可领7次,每次间隔需半小时\n)!")
elif (result2["code"]==7):
print("token已失效,请检查是否已自动删除所有配置文件,若未自动删除,请手动🙏删除所有配置文件并重新运行脚本,最后温馨提示:建议接入server酱通知!\n")
else:
print("请求接口失效或网络不佳,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#def 限时抢红包函数
def drawlottery(batchId):
global wm_latitude,wm_longitude,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行限时抢天天神券脚本🧧:**\n")
print(batchId)
datas = "parActivityId="+parActivityId+"&wm_latitude="+wm_latitude+"&wm_longitude="+wm_longitude+"&token="+token+"&batchId="+batchId+"&isShareLink=true"+"&propType=1"+"&propId="+str(propIdforuse)
url_drawlottery = baseurl+r"/cfeplay/playcenter/batchgrabred/drawlottery"
request =urllib.request.Request(url_drawlottery,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0):
print("领取成功!\n提示信息:%s\n红包属性:%s\n使用限制:%s\n红包价值:%s\n红包立即生效时间:%s\n红包剩余有效期:%s分钟\n"%(result2["msg"],result2["data"]["name"],result2["data"]["priceLimitdesc"],result2["data"]["showTitle"],result2["data"]["endTimeDesc"],str(float(result2["data"]["leftTime"])/60000)))
global showPriceNumber
showPriceNumber = result2["data"]["showPriceNumber"]
if int(showPriceNumber)<500:
print("**当前红包面值为%d元,小于5元,👴将自动执行小额红包转红包豆脚本!!**\n"%(int(showPriceNumber)/100))
else:
print("**当前红包面值为%d元,大于等于5元,👴将不会执行小额红包转红包豆脚本!!**\n"%(int(showPriceNumber)/100))
elif (result2["code"]==1 and result2["subcode"]==3):
print("%s😅\n"%(result2["msg"]))
elif(result2["code"]==1 and result2["subcode"]==-1):
print("token错误或已失效,%s\n"%(result2["msg"]))
elif (result2["code"]==7):
print("token已失效,请手动🙏删除所有自动生成的配置文件,并建议接入server酱通知!\n")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义接受红包函数,获得红包小于5元时,不执行此函数,并调用redtobean函数自动将红包转为红包豆,若两个函数都不执行,在抢红包成功5分钟左右红包会自动发放到账户
def acceptRed(batchId):
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
global wm_latitude,wm_longitude,token
print("**开始执行发放天天神券🧧到红包库脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token+"&batchId="+batchId
url_acceptRed = baseurl+r"/cfeplay/playcenter/batchgrabred/acceptRed"
request =urllib.request.Request(url_acceptRed,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0):
print("*👴抢到的红包已经领取成功啦,快去使用吧!*\n")
elif (result2["code"]==1):
print("%s\n"%(result2["msg"]))
elif (result2["code"]==7):
print("token已失效,请手动🙏删除所有自动生成的配置文件,并建议接入server酱通知!\n")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义红包转红包豆函数,将小于5元的红包转为红包豆
def redtobean(batchId):
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
global wm_latitude,wm_longitude
print("**默认尝试执行面值小于5元🧧自动转红包豆脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token+"&batchId="+batchId
url_drawlottery = baseurl+r"/cfeplay/playcenter/batchgrabred/redToBean"
request =urllib.request.Request(url_drawlottery,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0):
print("👴小额红包转红包豆成功!\n")
elif (result2["code"]==1 and result2["subcode"]==12):
# print("%s😅\n"%(result2["msg"]))
print("没有待转换的红包😅\n")
elif (result2["code"]==7):
print("token已失效,请手动🙏删除所有自动生成的配置文件,并建议接入server酱通知!\n")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#查询已领取到的天天神券
def querymyreward():
global token
print("**开始执行查询已领天天神券🧧脚本:**\n")
datas = "parActivityId="+parActivityId+"&token="+token
url_querymyreward = baseurl+r"/cfeplay/playcenter/batchgrabred/myreward"
request =urllib.request.Request(url_querymyreward,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
# print(result2)
# print(result2["code"])
if(result2["code"]==0 and len(result2["data"]["myawardInfos"])):
print("👴开始遍历红包库:\n")
print("红包库详细信息:\n")
print("红包库中共有%d个红包\n"%(len(result2["data"]["myawardInfos"])))
cent=0
count = 0
isover15=0
for k in result2["data"]["myawardInfos"]:
if not k["status"]:
print("**第%d个红包有效!!!!**\n红包属性:%s\n使用限制:%s\n红包价值:%s元\n红包剩余有效期%s分钟\n"%(cent+1,k["name"],k["priceLimitdesc"],k["showPriceNumberYuan"],str(float(k["leftTime"])/60000)))
if(int(k["showPriceNumberYuan"])>15):
isover15 =1
print("\n")
else:
count=count+1
if cent == 0:
print("**过期红包详情:**\n")
cent=cent+1
if(propIdforuse!=5):
print("总计已领取%d个红包,其中已过期%d个😅,有效%d个\n"%(cent,count,cent-count))
else:
if isover15==1:
print("恭喜你领取大额限时红包,具体价值如上所示!!总计已领取%d个红包,其中已过期%d个😅,有效%d个\n"%(cent,count,cent-count))
print("\n")
elif (result2["code"]==1):
print("%s\n"%(result2["msg"]))
elif (result2["code"]==7):
print("token已失效,请手动🙏删除所有自动生成的配置文件,并建议接入server酱通知!\n")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#获取每日浏览天天神券奖励的30豆
def sendTaskRedBean():
global wm_latitude,wm_longitude,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行领取每日30豆的脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token+"&portraitId="+str(portraitId)
url_sendTaskRedBean = baseurl+r"/cfeplay/playcenter/batchgrabred/sendTaskRedBean"
request =urllib.request.Request(url_sendTaskRedBean,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["status"]==0):
print("%s\n今天领取成功%d个红包豆,请明日再来!\n"%(result2["msg"],result2["sendBeanCount"]))
elif (result2["status"]==1):
print("您今日已领取过😅,%s\n"%(result2["msg"]))
elif (result2["status"]==-1):
print("portraitId已失效,%s\n"%(result2["msg"]))
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义每日签到得必中符函数
def doAction():
global wm_latitude,wm_longitude,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行每日签到领必中符🧧的脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token+"&action=SiginInGetProp"
url_doaction = baseurl+r"/cfeplay/playcenter/batchgrabred/doAction"
request =urllib.request.Request(url_doaction,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==0 and result2["data"]["signDays"]!=0):
print("签到%s\n,截止今日这周已签到%d天"%(result2["msg"],result2["data"]["signDays"]))
elif (result2["code"]==0 and result2["data"]["signDays"]==0):
print("您今日已签到,请明天再来!")
elif (result2["code"]==7):
print("参数异常或接口已失效")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#查看道具库中的必中符记录
def querymyProps():
global propIdforuse
global wm_latitude,wm_longitude,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行查询道具库中必中符🧧详情的脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token
url_querymyprops = baseurl+r"/cfeplay/playcenter/batchgrabred/myProps"
request =urllib.request.Request(url_querymyprops,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==0 and len(result2["data"])):
print("👴开始遍历道具库:\n")
print("道具库详细信息:\n")
print("红包库中共有%d个必中符道具\n"%(len(result2["data"])))
cent=0
count = 0
for k in result2["data"]:
if k["status"]==1:
print("第%d个必中符道具有效!!!!\n必中符道具id号:%s\n必中符道具属性:%s\n过期时间:%s\n"%(cent+1,k["recordNo"],k["propName"],k["expireTime"]))
if cent==0:
propIdforuse = k["propId"]
print("\n")
else:
count=count+1
cent=cent+1
if (count!=0):
print("总计%d个必中符道具,已过期%d个😅,有效%d个\n"%(cent,count,cent-count))
if ((cent-count)!=0):
print("**注意:每天中午抢红包🧧时将自动为您使用道具库中第一个道具!!** ")
else:
print(" **注意:道具库无有效道具,无法使用必中符,下次抢红包将使用默认参数抢红包(拼手气😅)!!** ")
print("\n")
elif (result2["code"]==7):
print("参数异常或接口已失效,请手动🙏删除所有自动生成的配置文件,并建议接入server酱通知!")
else:
print("必中符道具库为空,👴未帮您领取过道具!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#已废弃,直接发送兑换请求即可,不在兑换时间段 subcode 为13
#定义运行时是否能兑换豆子成必中符,目前一直为14点至16点,故不定义此函数,采取每天14点至16点运行此程序时直接尝试兑换
#若需自行获取当前时间段是否可换豆子为道具,则post以下请求即可
# POST /cfeplay/playcenter/batchgrabred/canExchangeCheck HTTP/1.1
# Host: i.waimai.meituan.com
# Content-Length: 82
# User-Agent:MeituanGroup/11.9.208
# x-requested-with: XMLHttpRequest
# content-type: application/x-www-form-urlencoded
# parActivityId=Gh1tkq-wvFU2xEP_ZPzHPQ&wm_latitude=30657401&wm_longitude=104065827
#定义豆子兑换成必中符函数:
def exchange():
global propId,wm_latitude,wm_longitude,token
# wm_latitude = getVar()[0]
# wm_longitude = getVar()[1]
wm_actual_latitude = str(wm_latitude)
wm_actual_longitude =str(wm_longitude)
print("**开始执行每日豆子兑换必中符脚本**:\n")
while(1):
datas = "wm_actual_longitude="+wm_actual_longitude+"&wm_actual_latitude="+wm_actual_latitude+"&exchangeRuleId=&propId="+str(propId)+"&exchangeCoinNumber="+str(exchangeCoinNumber)+"&parActivityId="+parActivityId+"&wm_ctype="+wm_ctype+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+token
url_exchange = baseurl+r"/cfeplay/playcenter/batchgrabred/exchange"
request =urllib.request.Request(url_exchange,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==0 and result2["subcode"]==0):
print("%s,您设置的红包豆兑换指定额度的必中符成功!!!请查看下方道具库详情!😄\n"%(result2["msg"]))
break
elif (result2["code"]==1 and result2["subcode"]==13):
print("%s\n"%(result2["msg"]))
break
elif (result2["code"]==1 and result2["subcode"]==-1):
print("%s,您现在的红包豆不足以兑换此类必中符或者此类必中符已被抢完!\n正尝试兑换*次一等级*必中符\n"%(result2["msg"]))
if(propId ==5):
propId =4
break
elif (result2["code"]==7):
print("参数异常或接口已失效\n")
else:
print("请求接口失效或参数异常,请稍后再试!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,准备退出程序,错误代码为:%s\n"%(e.code))
if hasattr(e,"reason"):
print("脚本执行失败👀,准备退出程序,错误代码为:%s\n"%(e.reason))
###定义查询豆子详情的函数
def myRedBeanRecords():
global wm_latitude,wm_longitude,leftdou,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行查询豆子变化详情参数脚本**:\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+str(token)+"&userPortraitId="+str(portraitId)+"&pageNum=1"
url_myredbeanRecords = baseurl+r"/cfeplay/playcenter/batchgrabred/myRedBeanRecords"
request =urllib.request.Request(url_myredbeanRecords,headers=head,data=datas.encode("utf-8"),method="POST")
try:
response = urllib.request.urlopen(request,timeout=10)
result = response.read().decode("utf-8")
result2 = json.loads(result)
cent=1
if(result2["code"]==0 and result2["subcode"]==0 and len(result2["data"]["redBeanRecordInfos"])):
leftdou= result2["data"]["totalObtainAmount"]-result2["data"]["usedAmount"]-result2["data"]["expiredAmount"]
print("**总获得红包豆:%d,已使用红包豆:%d,已过期红包豆:%d,剩余可用红包豆:%d**\n"%(result2["data"]["totalObtainAmount"],result2["data"]["usedAmount"],result2["data"]["expiredAmount"],leftdou))
for k in result2["data"]["redBeanRecordInfos"]:
print("exchangeTime:%s\texchangeMessage:%s\texchangeNumber:%s\n"%(k["exchangeTime"],k["exchangeMessage"],k["exchangeNumber"]))
cent=cent+1
if(cent>10):
break
print("*只显示最近十条红包豆的变化* \n")
elif (result2["code"]==1 and result2["subcode"]==-1):
print("%s\n"%(result2["msg"]))
else:
print("请求接口失效或参数异常,建议🙏重置参数!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义查询红包池函数
def queryredpool():
global wm_latitude,wm_longitude,token
# wm_latitude = $wm_latitude
# wm_longitude=$wm_longitude
print("**开始执行查询红包池详情脚本:**\n")
datas = "parActivityId="+parActivityId+"&wm_latitude="+str(wm_latitude)+"&wm_longitude="+str(wm_longitude)+"&token="+str(token)+"&wm_ctype="+wm_ctype
url_myredbeanRecords = baseurl+r"/cfeplay/playcenter/batchgrabred/corepage"
request =urllib.request.Request(url_myredbeanRecords,headers=head,data=datas.encode("utf-8"),method="POST")
try:
global eight,ten,fifteen,thirty,fifty,eight_left,ten_left,fifteen_left,thirty_left,fifty_left
response = urllib.request.urlopen(request)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==0 and result2["subcode"]==0 and len(result2["data"]["awardInfos"])):
for k in result2["data"]["awardInfos"]:
if"leftStock" not in k:
print("该地区没有红包池,脚本异常退出!")
# if (round(float(k["showPriceNumberYuan"]))==8 and k["leftStock"]==eight_left):
# eight = 0
if (round(float(k["showPriceNumberYuan"]))==10 and k["leftStock"]==ten_left):
ten = 0
if (round(float(k["showPriceNumberYuan"]))==15 and k["leftStock"]==fifteen_left):
fifteen = 0
if (round(float(k["showPriceNumberYuan"]))==30 and k["leftStock"]==thirty_left):
thirty = 0
if (round(float(k["showPriceNumberYuan"]))==50 and k["leftStock"]==fifty_left):
fifty = 0
if counttime<3:
sprint("*红包池中%s元总量:%d张,已被领取:%d张,剩余%d张*\n"%(k["showPriceNumberYuan"],k["totalStock"],k["sendStock"],k["leftStock"]))
counttime =counttime +1
elif (result2["code"]==1 and result2["subcode"]==-1):
print("token失效,导致获取活动信息失败!%s\n"%(result2["msg"]))
else:
print("红包池未开放,等待中!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义pushPlus的消息推送函数
def pushPlus():
global pushPlusToken
global webhook
# pushPlusToken = $pushPlusToken
if not os.path.exists(str(cwd)+r"/output.txt"):
print("output.txt文件异常,推送退出!🙌")
return -1
file4= open(str(cwd)+r"/output.txt", mode='r',encoding="UTF-8")
message = str(file4.read())
file4.close
pushurl="https://www.pushplus.plus/send"
head_server ={"Host": "www.pushplus.plus","User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Mobile Safari/537.36","content-type":"application/x-www-form-urlencoded"}
print("**开始执行pushPlus推送脚本:**\n")
datas=bytes(urllib.parse.urlencode({"title":"天天神券推送","content":message,"token":pushPlusToken,"template":"markdown","channel":"wechat","webhook":webhook,"callbackUrl":""}),encoding="UTF-8")
request =urllib.request.Request(pushurl,headers=head_server,data=datas,method="POST")
try:
response = urllib.request.urlopen(request,timeout=30)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==200) :
print("pushPlus消息推送成功!\n\n")
else:
print("请求接口失效或参数异常,建议重置参数!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
#定义server 酱的消息推送函数
def serverjiang():
# serverkey = $serverkey
if not os.path.exists(str(cwd)+r"/output.txt"):
print("output.txt文件异常,推送退出!🙌")
return -1
file4= open(str(cwd)+r"/output.txt", mode='r',encoding="UTF-8")
message = str(file4.read())
file4.close
pushurl="https://sctapi.ftqq.com/"
head_server ={"Host": "sctapi.ftqq.com","User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Mobile Safari/537.36","content-type":"application/x-www-form-urlencoded"}
url_serverkey = pushurl+serverkey+".send"
print("**开始执行server酱推送脚本:**\n")
datas=bytes(urllib.parse.urlencode({"title":"天天神券推送","desp":message,"channel":""}),encoding="UTF-8")
request =urllib.request.Request(url_serverkey,headers=head_server,data=datas,method="POST")
try:
response = urllib.request.urlopen(request,timeout=30)
result = response.read().decode("utf-8")
result2 = json.loads(result)
if(result2["code"]==0) :
pushid = result2["data"]["pushid"]
readkey = result2["data"]["readkey"]
url_checkurl = pushurl+"push?id="+pushid+"&readkey="+readkey
request2 = urllib.request.Request(url_checkurl,headers=head_server,data=datas)
try:
response2 = urllib.request.urlopen(request2,timeout=30)
text=json.loads(response2.read().decode("utf-8"))
if(text["data"]["title"] =="天天神券推送"):
print("server酱推送成功😄!请在移动设备端查看\n")
else:
print("server酱推送失败👀,请检查serverkey是否正确!\n")
except urllib.error.URLError as e2:
if hasattr(e2,"code"):
print("脚本执行失败👀,错误代码如下:\n")
print(e2.code)
if hasattr(e2,"reason"):
print(e2,"reason")
else:
print("请求接口失效或参数异常,建议重置参数!\n")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print("脚本执行失败,错误代码如下:\n")
print(e.code)
if hasattr(e,"reason"):
print(e,"reason")
def main():
global propIdforuse,token
temp = sys.stdout
print("本脚本提供pushPlus、serverkey这两种推送方式,可以二选一或者全选,首次运行脚本请依次选择是否开启对应推送!\n由于server酱每日免费限额5条,若需开启推送,请首选pushPlus!\n")
# getpushPlusToken()
# getserverkey()
# token = gettoken()
# getlatlongitude()
# getpropId_Coinnumber(token)
sys.stdout = Logger(str(cwd)+r'/output.txt')
batchId = getbatchId()
##先去保持每天签到 以获得必中符或者豆子
doAction()
myRedBeanRecords()
if leftdou >=setexchangedou:
exchange()
else:
print("您当前红包豆为%d未满预设的%d数量,不会执行红包豆兑换必中符脚本,多攒几天豆子再来吧!\n"%(leftdou,setexchangedou))
querymyProps()
#定义bool类型变量判断当前时间段是不是自定义的大额抢红包时间段
istimeforbig1= (n_time <=d_time4) and(n_time>=d_time3)
istimeforbig2= (n_time <=d_time6) and(n_time>=d_time5)
if n_time > d_time7:
if istimeforbig1:
if propIdforuse ==5:
print("**当前符合抢30元以上大额红包的条件**\n")
print("**正使用15元必中符为您尝试抢30元以上的红包**\n")
##拥有15块以上的必中符,先等待着试图抢30,要是15没了,就直接去抢30的红包,或许有可能抢到50
while fifteen ==1 :
if not istimeforbig1:
print("*👴尽力了,等到红包池要关闭了都未等到15元以上大额红包被抢完,开始保底15元,注意查收!*\n")
break
if(thirty ==1 and fifty ==1):
print("*15有剩余,30元已被抢完,50元已被抢完,跳出监测,正在为您抢保底15元红包!*\n")
break
queryredpool()
if istimeforbig2 :
if propIdforuse ==5:
print("**当前符合抢30元以上大额红包的条件**\n")
print("**正使用15元必中符为您尝试抢30元以上的红包**\n")
##拥有15块以上的必中符,先等待着试图抢30,要是15没了,就直接去抢30的红包,或许有可能抢到50
while fifteen ==1 :
if not istimeforbig2 :
print("*👴尽力了,等到红包池要关闭了都未等到15元以上大额红包被抢完,开始保底15元,注意查收!*\n")
break
if(thirty ==1 and fifty ==1):
print("*15有剩余,30元已被抢完,50元已被抢完,跳出监测,正在为您抢保底15元红包!*\n")
break
queryredpool()
if istimeforbig1:
if propIdforuse ==3:
print("**当前符合抢30元以上大额红包的条件**\n")
print("**正使用10元必中符为您尝试抢30元以上的红包**\n")
##拥有10块以上的必中符,先等待着试图抢30,要是10和15都没了,就直接去抢30的红包,或许有可能抢到50
while fifteen ==1 :
if(thirty ==1 and fifty ==1 ):
print("&15有剩余,30元已被抢完,50元已被抢完,跳出监测,正在为您抢保底15元红包!*\n")
break
if(br ==1):
break
if not istimeforbig1:
print("*👴尽力了,等到红包池要关闭了都未等到15元以上大额红包被抢完,开始保底15元,注意查收!*\n")
break
if ten ==0 :
queryredpool()
while ten ==1:
if not istimeforbig1:
br = 1
print("*👴尽力了,等到红包池要关闭了都未等到任意大额红包被抢完,开始保底10元,注意查收!*\n")
queryredpool()
if istimeforbig2:
if propIdforuse ==3:
print("**当前符合抢30元以上大额红包的条件**\n")
print("**正使用10元必中符为您尝试抢30元以上的红包**\n")
##拥有10块以上的必中符,先等待着试图抢30,要是10和15都没了,就直接去抢30的红包,或许有可能抢到50
while fifteen ==1 :
if(thirty ==1 and fifty ==1 ):
print("&15有剩余,30元已被抢完,50元已被抢完,跳出监测,正在为您抢保底15元红包!*\n")
break
if(br ==1):
break
if not istimeforbig2:
print("*👴尽力了,等到红包池要关闭了都未等到15元以上大额红包被抢完,开始保底15元,注意查收!*\n")
break
if ten ==0 :
queryredpool()
while ten ==1:
if not istimeforbig2:
br = 1
print("*👴尽力了,等到红包池要关闭了都未等到任意大额红包被抢完,开始保底10元,注意查收!*\n")
queryredpool()
if n_time < d_time7 :
propIdforuse =1
drawlottery(batchId)
if(int(showPriceNumber)<500):
redtobean(batchId)
else:
acceptRed(batchId)
querymyreward()
sendTaskRedBean()
querymyProps()
myRedBeanRecords()
sys.stdout = temp
if(yesornot2 == "y"):
pushPlus()
else:
print("您已默认关闭pushPlus推送!若需开启,请将pushPlusToken 填入本脚本目录下的pushPlusToken.txt文本中!\n")
if(yesornot == "y"):
if n_time>d_time0:
serverjiang()
else:
print("当前时间段非抢红包时间,默认关闭server酱推送以节约server酱每日5条推送的限额!")
else:
print("您已默认关闭server酱推送!若需开启,请将serverkey 填入本脚本目录下的serverkey.txt文本中!\n")
if __name__ == "__main__":
main() |
the-stack_0_8713 | import twitter
from searchtweets import ResultStream, gen_rule_payload, load_credentials, collect_results
import json
import os.path
user_list = []
followers_list = []
# api = twitter.Api(consumer_key='C0Q2slgd38EQUV82soOig68Uo',
# consumer_secret='JKJ0tVC8vnlDmVbvPT4BF67nx7r5VqnJTSPHMiGqJLo43bba3m',
# access_token_key='479643521-Q7wHBkOomWOSa7j2jqiKrh5i8VSCbnZewOy0lUJv',
# access_token_secret='HSdLbWQiLXtwpZKKI3W2iW98oDk3QJbrGBEGYmAHhlwU4')
# api = twitter.Api(consumer_key='Wa5xi8yfBZ4LihhpZp2KqzlOq',
# consumer_secret='JHZn4GSi08P6e2S71eRAOT2cDWBk0VrYbMwOg0XhzssOALbsDE',
# access_token_key='86863810-NA4wtMzKrQ62EMIvFUyIaTlXuIWGjd5QwlZkJBL4P',
# access_token_secret='DuhCa5Kg3zjHJykC3W30wPxteEwz5QGEQZvoDAqiVwM5o')
premium_search_args = load_credentials(filename="./twitter_keys.yaml",
yaml_key="search_tweets_30_day_dev",
env_overwrite=False)
rule = gen_rule_payload("bcp point_radius:[-77.0304221 -12.1217806 20mi]", results_per_call=100)
bcp = collect_results(rule,
max_results=100,
result_stream_args=premium_search_args)
[print(tweet.all_text) for tweet in results[0:10]]
# %%
# Load File
if os.path.isfile('Miner/bcp.json'):
with open('Miner/bcp.json') as json_file:
past_res_json = json.load(json_file)
past_res = [twitter.Status.NewFromJsonDict(x) for x in past_res_json.get('statuses', '')]
else:
past_res = None
# results = api.GetSearch(raw_query="q=banco bcp", geocode='-12.04902,-77.03360,10km', return_json=True)
# with open('bcp.json', 'w') as f:
# json.dump(results, f)
# %%
# Get credentials and search
rawurl = 'https://api.twitter.com/1.1/search/tweets.json?q=from%3Atwitterdev&result_type=mixed&count=2'
results_json = api.GetSearch(term='bcp')
results = [twitter.Status.NewFromJsonDict(x) for x in results_json.get('statuses', '')]
# %%
if past_res:
total_result = past_res+results
# %%
with open('bcp.json', 'w') as f:
json.dump(total_result, f)
results = [twitter.Status.NewFromJsonDict(x) for x in results.get('statuses', '')]
# %%
for tweet in results:
tmptweet = tweet.AsDict()
# print(tmptweet)
user_list.append(tmptweet['user']['id'])
print(tmptweet['user']['screen_name'])
# %%
for tweet in results:
tmptweet = tweet.AsDict()
|
the-stack_0_8714 | #%%
import numpy as np
import numpy.linalg as lin
import scipy.stats as sts
import scipy.integrate as intgr
import scipy.optimize as opt
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
#%%
incomes = np.array([[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700 , 1800, 1900, 2000]])
incomes = incomes * 10000
# URL: https://www.mf-realty.jp/tebiki/taxbusiness/capter06_05.html
effective_tax = np.array([0.156, 0.164, 0.172, 0.21, 0.238, 0.258, 0.272, 0.286, 0.297, 0.316, 0.331, 0.344, 0.355, 0.364, 0.373, 0.38, 0.386, 0.392, 0.40, 0.48])
#%%
### GS Tax Function
# URL: https://www.jstor.org/stable/pdf/41789070.pdf
def tax_func(I, phi0, phi1, phi2):
#txrates = ((phi0 * (I - ((I ** -phi1) + phi2) ** (-1 / phi1))) / I)
txrates = phi0 - phi0 * (phi1 * I ** phi2 + 1)**(-1 / phi2)
return txrates
def tax_func2(I, phi0, phi1, phi2):
tau = (phi0 * (I - ((I ** -phi1) + phi2) ** (-1 / phi1)))
return tau
#%%
def model_moments(I_array, phi0, phi1, phi2):
return tax_func(I_array, phi0, phi1, phi2)
def data_moments():
effective_tax = np.array([0.156, 0.164, 0.172, 0.21, 0.238, 0.258, 0.272, 0.286, 0.297, 0.316, 0.331, 0.344, 0.355, 0.364, 0.373, 0.38, 0.386, 0.392, 0.40, 0.48])
return effective_tax
def err_vec(income, phi0, phi1, phi2, simple):
data_mms = data_moments()
model_mms = model_moments(income, phi0, phi1, phi2)
if simple:
err_vec = model_mms - data_mms
else:
err_vec = (model_mms - data_mms) / data_mms
return err_vec
def criterion(params, *args):
phi0, phi1, phi2 = params
income, W = args
err = err_vec(income, phi0, phi1, phi2, simple = False).squeeze()
crit_val = err.T @ W @ err
return crit_val
#%%
### Optimization Problem:
# Initial guess of parameters
phi0 = 0.479
phi1 = 0.022
phi2 = 0.817
params_init = np.array([phi0, phi1, phi2])
# Weighting matrix
W_hat = np.eye(20)
incomes = np.array([[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700 , 1800, 1900, 2000]])
incomes = incomes * 10000
incomes = incomes * 10 ** (-6)
#gmm_args = (I_array, I_array_2, W_hat)
gmm_args = (incomes, W_hat)
# Optimization
results_GMM = opt.minimize(criterion, params_init, args = (gmm_args), method = 'L-BFGS-B')
print(results_GMM)
phi0_GMM, phi1_GMM, phi2_GMM = results_GMM.x
#%%
### Plots
I = np.linspace(1,20,20)
tax_rate = tax_func(I, phi0_GMM, phi1_GMM, phi2_GMM)
plt.xlabel('Income (Millions of Yen)')
plt.ylim(0, 0.5)
plt.ylabel('Effecitve Tax Rate $\tau_{s,t}^{ETR}$')
plt.plot(I, tax_rate, color = 'r', label = r'Estimated Tax Rates')
plt.legend(loc='upper left')
tax_rate_data = np.array(effective_tax)
plt.scatter(I, tax_rate_data, label = r'Calculated Tax Rates')
plt.legend(loc='upper left')
plt.grid(b=True, which='major', color='0.65', linestyle='-')
plt.tight_layout(rect=(0, 0.03, 1, 1))
plt.savefig("effective_tax_gs.png")
plt.close()
#%%
def marg_tax(I, phi0, phi1, phi2):
margrates = phi0 * phi1 * I ** (phi2 - 1) * (phi1 * I ** phi2 + 1) ** ( (- 1 - phi2) / phi2)
return margrates
marg = marg_tax(I, phi0_GMM, phi1_GMM, phi2_GMM)
I = np.linspace(1,20,20)
plt.xlabel('Income (Millions of Yen)')
plt.ylabel(r'Marginal Tax Rate $\tau_{s,t}^{MTR}$')
plt.plot(I, marg, color = 'r', label = r'Estimated Tax Rates')
plt.legend(loc='upper right')
plt.grid(b=True, which='major', color='0.65', linestyle='-')
plt.tight_layout(rect=(0, 0.03, 1, 1))
plt.savefig("marginal_tax_gs.png")
plt.close()
|
the-stack_0_8715 | import json
import shutil
import logging
from flask import Blueprint, request
from tempfile import mkdtemp
from werkzeug.exceptions import BadRequest
from normality import safe_filename, stringify
from servicelayer.archive.util import ensure_path
from aleph.core import db, archive
from aleph.model import Document, Entity, Events
from aleph.queues import ingest_entity
from aleph.index.entities import index_proxy
from aleph.logic.notifications import publish, channel_tag
from aleph.views.util import get_db_collection, get_flag
from aleph.views.util import jsonify, validate_data, get_session_id
from aleph.views.forms import DocumentCreateSchema
log = logging.getLogger(__name__)
blueprint = Blueprint('ingest_api', __name__)
def _load_parent(collection, meta):
"""Determine the parent document for the document that is to be
ingested."""
parent_id = meta.get('parent_id')
if parent_id is None:
return
parent = Document.by_id(parent_id, collection_id=collection.id)
if parent is None:
raise BadRequest(response=jsonify({
'status': 'error',
'message': 'Cannot load parent document'
}, status=400))
return parent
def _load_metadata():
"""Unpack the common, pre-defined metadata for all the uploaded files."""
try:
meta = json.loads(request.form.get('meta', '{}'))
except Exception as ex:
raise BadRequest(str(ex))
validate_data(meta, DocumentCreateSchema)
foreign_id = stringify(meta.get('foreign_id'))
if not len(request.files) and foreign_id is None:
raise BadRequest(response=jsonify({
'status': 'error',
'message': 'Directories need to have a foreign_id'
}, status=400))
return meta, foreign_id
def _notify(collection, document_id):
if not collection.casefile:
return
channels = [
channel_tag(document_id, Entity),
channel_tag(collection),
]
params = {
'collection': collection,
'document': document_id
}
publish(Events.INGEST_DOCUMENT,
params=params,
channels=channels,
actor_id=request.authz.id)
db.session.commit()
@blueprint.route('/api/2/collections/<int:collection_id>/ingest',
methods=['POST', 'PUT'])
def ingest_upload(collection_id):
collection = get_db_collection(collection_id, request.authz.WRITE)
job_id = get_session_id()
sync = get_flag('sync', default=False)
meta, foreign_id = _load_metadata()
parent = _load_parent(collection, meta)
upload_dir = ensure_path(mkdtemp(prefix='aleph.upload.'))
try:
content_hash = None
for storage in request.files.values():
path = safe_filename(storage.filename, default='upload')
path = upload_dir.joinpath(path)
storage.save(str(path))
content_hash = archive.archive_file(path)
document = Document.save(collection=collection,
parent=parent,
foreign_id=foreign_id,
content_hash=content_hash,
meta=meta,
uploader_id=request.authz.id)
collection.touch()
db.session.commit()
proxy = document.to_proxy()
if proxy.schema.is_a(Document.SCHEMA_FOLDER) and sync:
index_proxy(collection, proxy, sync=sync)
ingest_entity(collection, proxy, job_id=job_id, sync=sync)
document_id = collection.ns.sign(document.id)
_notify(collection, document_id)
finally:
shutil.rmtree(upload_dir)
return jsonify({
'status': 'ok',
'id': document_id
}, status=201)
|
the-stack_0_8717 | from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(831, 682)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setAutoFillBackground(False)
self.default_palette = QApplication.palette()
self.default_style = QApplication.style()
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_4 = QGridLayout(self.centralwidget)
self.gridLayout_4.setObjectName("gridLayout_4")
self.mainlayout = QGridLayout()
self.mainlayout.setObjectName("mainlayout")
spacerItem = QSpacerItem(0, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.mainlayout.addItem(spacerItem, 7, 0, 1, 1)
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.title_welcome = QLabel(self.centralwidget)
self.title_welcome.setBaseSize(QtCore.QSize(800, 25))
self.title_welcome.setFont(font)
self.title_welcome.setAlignment(QtCore.Qt.AlignCenter)
self.title_welcome.setObjectName("title_welcome")
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.titlename = QLabel(self.centralwidget)
self.titlename.setBaseSize(QtCore.QSize(800, 30))
self.titlename.setFont(font)
self.titlename.setAlignment(QtCore.Qt.AlignCenter)
self.titlename.setObjectName("titlename")
self.software_info1 = QLabel(self.centralwidget)
self.software_info1.setBaseSize(QtCore.QSize(800, 15))
self.software_info1.setObjectName("software_info1")
self.software_info2 = QLabel(self.centralwidget)
self.software_info2.setBaseSize(QtCore.QSize(800, 15))
self.software_info2.setObjectName("software_info2")
self.software_info3 = QLabel(self.centralwidget)
self.software_info3.setBaseSize(QtCore.QSize(800, 17))
self.software_info3.setObjectName("software_info3")
self.mainlayout.addWidget(self.title_welcome, 0, 1, 1, 1)
self.mainlayout.addWidget(self.titlename, 1, 1, 1, 1)
self.mainlayout.addWidget(self.software_info1, 2, 1, 1, 1)
self.mainlayout.addWidget(self.software_info2, 3, 1, 1, 1)
self.mainlayout.addWidget(self.software_info3, 4, 1, 1, 1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
self.gratinglayout = QGridLayout()
self.gratinglayout.setObjectName("gratinglayout")
self.groove_label = QLabel(self.centralwidget)
self.groove_label.setFont(font)
self.groove_label.setObjectName("groove_label")
self.groove_form = QComboBox(self.centralwidget)
self.groove_form.setObjectName("groove_form")
self.groove_form.addItem("")
self.groove_form.addItem("")
self.groove_form.addItem("")
self.micro_setting_label = QLabel(self.centralwidget)
self.micro_setting_label.setFont(font)
self.micro_setting_label.setObjectName("micro_setting_label")
self.micro_setting_form = QDoubleSpinBox(self.centralwidget, value=9.00, maximum=22.00, minimum=0.00, singleStep=0.01)
self.micro_setting_form.setObjectName("micro_setting_form")
self.feature_label = QLabel(self.centralwidget)
self.feature_label.setFont(font)
self.feature_label.setObjectName("feature_label")
self.feature_form = QLineEdit(self.centralwidget)
self.feature_form.setObjectName("feature_form")
self.gratinglayout.addWidget(self.groove_label, 0, 0, 1, 1)
self.gratinglayout.addWidget(self.groove_form, 0, 1, 1, 1)
self.gratinglayout.addWidget(self.micro_setting_label, 0, 2, 1, 1)
self.gratinglayout.addWidget(self.micro_setting_form, 0, 3, 1, 1)
self.gratinglayout.addWidget(self.feature_label, 0, 4, 1, 1)
self.gratinglayout.addWidget(self.feature_form, 0, 5, 1, 1)
self.verticalLayout.addLayout(self.gratinglayout)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.observer_label = QLabel(self.centralwidget)
self.observer_label.setFont(font)
self.observer_label.setObjectName("observer_label")
self.observer_form = QTextEdit(self.centralwidget)
self.observer_form.setObjectName("observer_form")
self.observer_form.lineWrapMode()
self.lamplayout = QGridLayout()
self.lamplayout.setObjectName("lamplayout")
self.lamp_label = QLabel(self.centralwidget)
self.lamp_label.setFont(font)
self.lamp_label.setObjectName("lamp_label")
self.lamp_form = QComboBox(self.centralwidget)
self.lamp_form.setObjectName("lamp_form")
self.lamp_form.addItem("")
self.lamp_form.addItem("")
spacerItem1 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
spacerItem2 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.horizontalLayout.addWidget(self.observer_label)
self.horizontalLayout.addWidget(self.observer_form)
self.lamplayout.addWidget(self.lamp_label, 2, 1, 1, 1)
self.lamplayout.addWidget(self.lamp_form, 3, 1, 1, 1)
self.lamplayout.addItem(spacerItem1, 0, 1, 1, 1)
self.lamplayout.addItem(spacerItem2, 4, 1, 1, 1)
self.horizontalLayout.addLayout(self.lamplayout)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout_9 = QGridLayout()
self.gridLayout_9.setObjectName("gridLayout_9")
self.datatype_label = QLabel(self.centralwidget)
self.datatype_label.setAlignment(QtCore.Qt.AlignCenter)
self.datatype_label.setObjectName("datatype_label")
self.datatype_form = QComboBox(self.centralwidget)
self.datatype_form.setObjectName("datatype_form")
self.datatype_form.addItem("")
self.datatype_form.addItem("")
self.datatype_form.addItem("")
self.datatype_form.addItem("")
self.datatype_form.addItem("")
self.datatype_form.setEnabled(False)
self.objname_label = QLabel(self.centralwidget)
self.objname_label.setAlignment(QtCore.Qt.AlignCenter)
self.objname_label.setObjectName("objname_label")
self.objname_form = QLineEdit(self.centralwidget)
self.objname_form.setText("")
self.objname_form.setObjectName("objname_form")
self.objname_form.setEnabled(False)
self.filename_label = QLabel(self.centralwidget)
self.filename_label.setAlignment(QtCore.Qt.AlignCenter)
self.filename_label.setObjectName("filename_label")
self.filename_form = QLineEdit(self.centralwidget)
self.filename_form.setObjectName("filename_form")
self.filename_form.setEnabled(False)
self.bin_label = QLabel(self.centralwidget)
self.bin_label.setAlignment(QtCore.Qt.AlignCenter)
self.bin_label.setObjectName("bin_label")
self.bin_form = QComboBox(self.centralwidget)
self.bin_form.setObjectName("bin_form")
self.bin_form.addItem("")
self.bin_form.addItem("")
self.bin_form.addItem("")
self.bin_form.addItem("")
self.bin_form.setEnabled(False)
self.exptime_label = QLabel(self.centralwidget)
self.exptime_label.setAlignment(QtCore.Qt.AlignCenter)
self.exptime_label.setObjectName("exptime_label")
self.exptime_form = QDoubleSpinBox(self.centralwidget, value=0., maximum=10000.00, minimum=0.00, singleStep=0.01)
self.exptime_form.setObjectName("exptime_form")
self.exptime_form.setEnabled(False)
self.nexp_label = QLabel(self.centralwidget)
self.nexp_label.setAlignment(QtCore.Qt.AlignCenter)
self.nexp_label.setObjectName("nexp_label")
self.nexp_form = QSpinBox(self.centralwidget, value=1, maximum=50.00, minimum=1, singleStep=1)
self.nexp_form.setObjectName("nexp_form")
self.nexp_form.setEnabled(False)
self.gridLayout_9.addWidget(self.datatype_label, 0, 0, 1, 1)
self.gridLayout_9.addWidget(self.datatype_form, 1, 0, 1, 1)
self.gridLayout_9.addWidget(self.objname_label, 0, 1, 1, 1)
self.gridLayout_9.addWidget(self.objname_form, 1, 1, 1, 1)
self.gridLayout_9.addWidget(self.filename_label, 0, 2, 1, 1)
self.gridLayout_9.addWidget(self.filename_form, 1, 2, 1, 1)
self.gridLayout_9.addWidget(self.bin_label, 0, 3, 1, 1)
self.gridLayout_9.addWidget(self.bin_form, 1, 3, 1, 1)
self.gridLayout_9.addWidget(self.exptime_label, 0, 4, 1, 1)
self.gridLayout_9.addWidget(self.exptime_form, 1, 4, 1, 1)
self.gridLayout_9.addWidget(self.nexp_label, 0, 5, 1, 1)
self.gridLayout_9.addWidget(self.nexp_form, 1, 5, 1, 1)
self.gridLayout_9.setColumnMinimumWidth(1, 1)
self.gridLayout_9.setColumnMinimumWidth(2, 2)
self.gridLayout_9.setColumnMinimumWidth(4, 1)
self.gridLayout_9.setColumnStretch(1, 1)
self.gridLayout_9.setColumnStretch(2, 2)
self.gridLayout_9.setColumnStretch(4, 1)
self.verticalLayout.addLayout(self.gridLayout_9)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 2)
self.verticalLayout.setStretch(2, 1)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.line_2 = QFrame(self.centralwidget)
self.line_2.setFrameShape(QFrame.VLine)
self.line_2.setObjectName("line_2")
self.horizontalLayout_2.addWidget(self.line_2)
self.gridLayout = QGridLayout()
self.gridLayout.setSizeConstraint(QLayout.SetMinimumSize)
self.gridLayout.setObjectName("gridLayout")
self.ccdtemp_label = QLabel(self.centralwidget)
self.ccdtemp_label.setFont(font)
self.ccdtemp_label.setObjectName("ccdtemp_label")
self.ccdtemp_form = QDoubleSpinBox(self.centralwidget, maximum=30.00, minimum=-20.00, singleStep=0.01)
self.ccdtemp_form.setObjectName("ccdtemp_form")
self.ccdtemp_form.setValue(-10.)
self.ambtemp_label = QLabel(self.centralwidget)
self.ambtemp_label.setFont(font)
self.ambtemp_label.setObjectName("ambtemp_label")
self.ambtemp_form = QDoubleSpinBox(self.centralwidget, value=20.00, maximum=50.00, minimum=0.00, singleStep=0.01)
self.ambtemp_form.setObjectName("ambtemp_form")
self.ambhum_label = QLabel(self.centralwidget)
self.ambhum_label.setFont(font)
self.ambhum_label.setObjectName("ambhum_label")
self.ambhum_form = QDoubleSpinBox(self.centralwidget, value=50.00, maximum=100.00, minimum=0.00, singleStep=0.01)
self.ambhum_form.setObjectName("ambhum_form")
self.skycond_label = QLabel(self.centralwidget)
self.skycond_label.setFont(font)
self.skycond_label.setObjectName("skycond_label")
self.skycond_form = QComboBox(self.centralwidget)
self.skycond_form.setObjectName("skycond_form")
self.skycond_form.addItem("")
self.skycond_form.addItem("")
self.skycond_form.addItem("")
self.skycond_form.addItem("")
self.skycond_form.addItem("")
self.comment_label = QLabel(self.centralwidget)
self.comment_label.setFont(font)
self.comment_label.setObjectName("comment_label")
self.comment_form = QLineEdit(self.centralwidget)
self.comment_form.setObjectName("comment_form")
self.gridLayout.addWidget(self.ccdtemp_label, 0, 0, 1, 2)
self.gridLayout.addWidget(self.ccdtemp_form, 0, 2, 1, 1)
self.gridLayout.addWidget(self.ambtemp_label, 1, 0, 1, 2)
self.gridLayout.addWidget(self.ambtemp_form, 1, 2, 1, 1)
self.gridLayout.addWidget(self.ambhum_label, 2, 0, 1, 2)
self.gridLayout.addWidget(self.ambhum_form, 2, 2, 1, 1)
self.gridLayout.addWidget(self.skycond_label, 3, 0, 1, 1)
self.gridLayout.addWidget(self.skycond_form, 3, 1, 1, 2)
self.gridLayout.addWidget(self.comment_label, 4, 0, 1, 1)
self.gridLayout.addWidget(self.comment_form, 4, 1, 1, 2)
self.gridLayout.setRowMinimumHeight(0, 1)
self.gridLayout.setRowMinimumHeight(1, 1)
self.gridLayout.setRowMinimumHeight(2, 1)
self.gridLayout.setRowMinimumHeight(3, 1)
self.gridLayout.setRowMinimumHeight(4, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.horizontalLayout_2.setStretch(0, 5)
self.horizontalLayout_2.setStretch(2, 2)
self.mainlayout.addLayout(self.horizontalLayout_2, 6, 1, 1, 1)
self.tableview = QTableView(self.centralwidget)
self.tableview.setEnabled(True)
self.tableview.setMinimumSize(QtCore.QSize(800, 280))
self.tableview.setBaseSize(QtCore.QSize(800, 280))
self.tableview.setObjectName("tableview")
self.tableview.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.mainlayout.addWidget(self.tableview, 7, 1, 1, 1)
self.confirmlayout = QGridLayout()
self.confirmlayout.setObjectName("confirmlayout")
self.nightmode_check = QCheckBox(self.centralwidget)
self.nightmode_check.setObjectName("nightmode_check")
self.startlog_button = QPushButton(self.centralwidget)
self.startlog_button.setObjectName("startlog_button")
self.updatelog_button = QPushButton(self.centralwidget)
self.updatelog_button.setEnabled(False)
self.updatelog_button.setObjectName("updatelog_button")
self.savelog_button = QPushButton(self.centralwidget)
self.savelog_button.setEnabled(False)
self.savelog_button.setObjectName("savelog_button")
self.resetlog_button = QPushButton(self.centralwidget)
self.resetlog_button.setEnabled(False)
self.resetlog_button.setObjectName("resetlog_button")
self.closewindow_button = QPushButton(self.centralwidget)
self.closewindow_button.setObjectName("closewindow_button")
spacerItem3 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.confirmlayout.addWidget(self.nightmode_check, 0, 0, 1, 1)
self.confirmlayout.addItem(spacerItem3, 0, 1, 1, 1)
self.confirmlayout.addWidget(self.startlog_button, 0, 2, 1, 1)
self.confirmlayout.addWidget(self.updatelog_button, 0, 3, 1, 1)
self.confirmlayout.addWidget(self.savelog_button, 0, 4, 1, 1)
self.confirmlayout.addWidget(self.resetlog_button, 0, 5, 1, 1)
self.confirmlayout.addWidget(self.closewindow_button, 0, 6, 1, 1)
self.mainlayout.addLayout(self.confirmlayout, 8, 1, 1, 1)
self.line = QFrame(self.centralwidget)
self.line.setFrameShape(QFrame.HLine)
self.line.setObjectName("line")
self.mainlayout.addWidget(self.line, 5, 1, 1, 1)
self.mainlayout.setRowStretch(6, 1)
self.mainlayout.setRowStretch(7, 3)
self.gridLayout_4.addLayout(self.mainlayout, 0, 0, 1, 1)
spacerItem4 = QSpacerItem(40, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem4, 1, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.groove_label.setBuddy(self.groove_form)
self.micro_setting_label.setBuddy(self.micro_setting_form)
self.feature_label.setBuddy(self.feature_form)
self.observer_label.setBuddy(self.observer_form)
self.lamp_label.setBuddy(self.lamp_form)
self.ccdtemp_label.setBuddy(self.ccdtemp_form)
self.ambtemp_label.setBuddy(self.ambtemp_form)
self.skycond_label.setBuddy(self.skycond_form)
self.ambhum_label.setBuddy(self.ambhum_form)
self.comment_label.setBuddy(self.comment_form)
self.datatype_label.setBuddy(self.datatype_form)
self.objname_label.setBuddy(self.objname_form)
self.filename_label.setBuddy(self.filename_form)
self.bin_label.setBuddy(self.bin_form)
self.exptime_label.setBuddy(self.exptime_form)
self.nexp_label.setBuddy(self.nexp_form)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.nightmode_check, self.startlog_button)
MainWindow.setTabOrder(self.startlog_button, self.updatelog_button)
MainWindow.setTabOrder(self.updatelog_button, self.savelog_button)
MainWindow.setTabOrder(self.savelog_button, self.resetlog_button)
MainWindow.setTabOrder(self.resetlog_button, self.closewindow_button)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "LHIRES Telescope Data Logging Software"))
self.title_welcome.setText(_translate("MainWindow", "Welcome to"))
self.titlename.setText(_translate("MainWindow", "LHIRES Telescope Data Logging Software"))
self.software_info2.setText(_translate("MainWindow", "version: 2.0"))
self.software_info1.setText(_translate("MainWindow", "by: Irfan Imaduddin"))
self.software_info3.setText(_translate("MainWindow", "contact: irfanimaduddin[at]gmail[dot]com"))
self.groove_label.setText(_translate("MainWindow", "Grating"))
self.groove_form.setItemText(0, _translate("MainWindow", "600 g/mm"))
self.groove_form.setItemText(1, _translate("MainWindow", "1200 g/mm"))
self.groove_form.setItemText(2, _translate("MainWindow", "2400 g/mm"))
self.micro_setting_label.setText(_translate("MainWindow", "Micrometer Setting"))
self.feature_label.setText(_translate("MainWindow", "Spectral Feature"))
self.observer_label.setText(_translate("MainWindow", "Observer(s)"))
self.lamp_label.setText(_translate("MainWindow", "Comparison Lamp"))
self.lamp_form.setItemText(0, _translate("MainWindow", "NeAr"))
self.lamp_form.setItemText(1, _translate("MainWindow", "FeNeAr"))
self.datatype_label.setText(_translate("MainWindow", "Datatype"))
self.datatype_form.setItemText(0, _translate("MainWindow", "Object"))
self.datatype_form.setItemText(1, _translate("MainWindow", "Comparison"))
self.datatype_form.setItemText(2, _translate("MainWindow", "Bias"))
self.datatype_form.setItemText(3, _translate("MainWindow", "Dark"))
self.datatype_form.setItemText(4, _translate("MainWindow", "Flat"))
self.objname_label.setText(_translate("MainWindow", "Object Name"))
self.filename_label.setText(_translate("MainWindow", "File Name"))
self.bin_label.setText(_translate("MainWindow", "Bin"))
self.bin_form.setItemText(0, _translate("MainWindow", "1"))
self.bin_form.setItemText(1, _translate("MainWindow", "2"))
self.bin_form.setItemText(2, _translate("MainWindow", "3"))
self.bin_form.setItemText(3, _translate("MainWindow", "4"))
self.exptime_label.setText(_translate("MainWindow", "Exptime"))
self.nexp_label.setText(_translate("MainWindow", "N"))
self.ccdtemp_label.setText(_translate("MainWindow", "CCD Temperature (C)"))
self.ambtemp_label.setText(_translate("MainWindow", "Ambient Temperature (C)"))
self.ambhum_label.setText(_translate("MainWindow", "Ambient Humidity (%)"))
self.skycond_label.setText(_translate("MainWindow", "Sky Condition"))
self.skycond_form.setItemText(0, _translate("MainWindow", "Good"))
self.skycond_form.setItemText(1, _translate("MainWindow", "Variable"))
self.skycond_form.setItemText(2, _translate("MainWindow", "Hazy"))
self.skycond_form.setItemText(3, _translate("MainWindow", "Cloudy"))
self.skycond_form.setItemText(4, _translate("MainWindow", "Rain"))
self.comment_label.setText(_translate("MainWindow", "Comment(s)"))
self.nightmode_check.setText(_translate("MainWindow", "Night Mode"))
self.startlog_button.setText(_translate("MainWindow", "Start"))
self.updatelog_button.setText(_translate("MainWindow", "Update"))
self.savelog_button.setText(_translate("MainWindow", "End && Save"))
self.resetlog_button.setText(_translate("MainWindow", "Reset"))
self.closewindow_button.setText(_translate("MainWindow", "Exit")) |
the-stack_0_8720 | r"""
Elements of bounded height in number fields
Sage functions to list all elements of a given number field with height less
than a specified bound.
AUTHORS:
- John Doyle (2013): initial version
- David Krumm (2013): initial version
- TJ Combs (2018): added Doyle-Krumm algorithm - 4
- Raghukul Raman (2018): added Doyle-Krumm algorithm - 4
REFERENCES:
- [DK2013]
"""
# ****************************************************************************
# Copyright (C) 2013 John Doyle and David Krumm
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function, division
from six.moves import range
from copy import copy
from itertools import product
from sage.rings.real_mpfr import RealField
from sage.rings.number_field.unit_group import UnitGroup
from sage.modules.free_module_element import vector
from sage.matrix.constructor import column_matrix
from sage.rings.rational_field import QQ
from sage.rings.all import RR, Infinity
from sage.geometry.polyhedron.constructor import Polyhedron
def bdd_norm_pr_gens_iq(K, norm_list):
r"""
Compute generators for all principal ideals in an imaginary quadratic field
`K` whose norms are in ``norm_list``.
The only keys for the output dictionary are integers n appearing in
``norm_list``.
The function will only be called with `K` an imaginary quadratic field.
The function will return a dictionary for other number fields, but it may be
incorrect.
INPUT:
- `K` -- an imaginary quadratic number field
- ``norm_list`` -- a list of positive integers
OUTPUT:
- a dictionary of number field elements, keyed by norm
EXAMPLES:
In `QQ(i)`, there is one principal ideal of norm 4, two principal ideals of
norm 5, but no principal ideals of norm 7::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_gens_iq
sage: K.<g> = NumberField(x^2 + 1)
sage: L = range(10)
sage: bdd_pr_ideals = bdd_norm_pr_gens_iq(K, L)
sage: bdd_pr_ideals[4]
[2]
sage: bdd_pr_ideals[5]
[-g - 2, -g + 2]
sage: bdd_pr_ideals[7]
[]
There are no ideals in the ring of integers with negative norm::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_gens_iq
sage: K.<g> = NumberField(x^2 + 10)
sage: L = range(-5,-1)
sage: bdd_pr_ideals = bdd_norm_pr_gens_iq(K,L)
sage: bdd_pr_ideals
{-5: [], -4: [], -3: [], -2: []}
Calling a key that is not in the input ``norm_list`` raises a KeyError::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_gens_iq
sage: K.<g> = NumberField(x^2 + 20)
sage: L = range(100)
sage: bdd_pr_ideals = bdd_norm_pr_gens_iq(K, L)
sage: bdd_pr_ideals[100]
Traceback (most recent call last):
...
KeyError: 100
"""
return {n: K.elements_of_norm(n) for n in norm_list}
def bdd_height_iq(K, height_bound):
r"""
Compute all elements in the imaginary quadratic field `K` which have
relative multiplicative height at most ``height_bound``.
The function will only be called with `K` an imaginary quadratic field.
If called with `K` not an imaginary quadratic, the function will likely
yield incorrect output.
ALGORITHM:
This is an implementation of Algorithm 5 in [DK2013]_.
INPUT:
- `K` -- an imaginary quadratic number field
- ``height_bound`` -- a real number
OUTPUT:
- an iterator of number field elements
EXAMPLES::
sage: from sage.rings.number_field.bdd_height import bdd_height_iq
sage: K.<a> = NumberField(x^2 + 191)
sage: for t in bdd_height_iq(K,8):
....: print(exp(2*t.global_height()))
1.00000000000000
1.00000000000000
1.00000000000000
4.00000000000000
4.00000000000000
4.00000000000000
4.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
8.00000000000000
There are 175 elements of height at most 10 in `QQ(\sqrt(-3))`::
sage: from sage.rings.number_field.bdd_height import bdd_height_iq
sage: K.<a> = NumberField(x^2 + 3)
sage: len(list(bdd_height_iq(K,10)))
175
The only elements of multiplicative height 1 in a number field are 0 and
the roots of unity::
sage: from sage.rings.number_field.bdd_height import bdd_height_iq
sage: K.<a> = NumberField(x^2 + x + 1)
sage: list(bdd_height_iq(K,1))
[0, a + 1, a, -1, -a - 1, -a, 1]
A number field has no elements of multiplicative height less than 1::
sage: from sage.rings.number_field.bdd_height import bdd_height_iq
sage: K.<a> = NumberField(x^2 + 5)
sage: list(bdd_height_iq(K,0.9))
[]
"""
if height_bound < 1:
return
yield K(0)
roots_of_unity = K.roots_of_unity()
for zeta in roots_of_unity:
yield zeta
# Get a complete set of ideal class representatives
class_group_reps = []
class_group_rep_norms = []
for c in K.class_group():
a = c.ideal()
class_group_reps.append(a)
class_group_rep_norms.append(a.norm())
class_number = len(class_group_reps)
# Find principal ideals of bounded norm
possible_norm_set = set([])
for n in range(class_number):
for m in range(1, int(height_bound + 1)):
possible_norm_set.add(m*class_group_rep_norms[n])
bdd_ideals = bdd_norm_pr_gens_iq(K, possible_norm_set)
# Distribute the principal ideals
generator_lists = []
for n in range(class_number):
this_ideal = class_group_reps[n]
this_ideal_norm = class_group_rep_norms[n]
gens = []
for i in range(1, int(height_bound + 1)):
for g in bdd_ideals[i*this_ideal_norm]:
if g in this_ideal:
gens.append(g)
generator_lists.append(gens)
# Build all the output numbers
for n in range(class_number):
gens = generator_lists[n]
s = len(gens)
for i in range(s):
for j in range(i + 1, s):
if K.ideal(gens[i], gens[j]) == class_group_reps[n]:
new_number = gens[i]/gens[j]
for zeta in roots_of_unity:
yield zeta * new_number
yield zeta / new_number
def bdd_norm_pr_ideal_gens(K, norm_list):
r"""
Compute generators for all principal ideals in a number field `K` whose
norms are in ``norm_list``.
INPUT:
- `K` -- a number field
- ``norm_list`` -- a list of positive integers
OUTPUT:
- a dictionary of number field elements, keyed by norm
EXAMPLES:
There is only one principal ideal of norm 1, and it is generated by the
element 1::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_ideal_gens
sage: K.<g> = QuadraticField(101)
sage: bdd_norm_pr_ideal_gens(K, [1])
{1: [1]}
::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_ideal_gens
sage: K.<g> = QuadraticField(123)
sage: bdd_norm_pr_ideal_gens(K, range(5))
{0: [0], 1: [1], 2: [-g - 11], 3: [], 4: [2]}
::
sage: from sage.rings.number_field.bdd_height import bdd_norm_pr_ideal_gens
sage: K.<g> = NumberField(x^5 - x + 19)
sage: b = bdd_norm_pr_ideal_gens(K, range(30))
sage: key = ZZ(28)
sage: b[key]
[157*g^4 - 139*g^3 - 369*g^2 + 848*g + 158, g^4 + g^3 - g - 7]
"""
negative_norm_units = K.elements_of_norm(-1)
gens = {}
if not negative_norm_units:
for n in norm_list:
if not n:
gens[n] = [K.zero()]
else:
gens[n] = K.elements_of_norm(n) + K.elements_of_norm(-n)
else:
for n in norm_list:
gens[n] = K.elements_of_norm(n)
return gens
def integer_points_in_polytope(matrix, interval_radius):
r"""
Return the set of integer points in the polytope obtained by acting on a
cube by a linear transformation.
Given an r-by-r matrix ``matrix`` and a real number ``interval_radius``,
this function finds all integer lattice points in the polytope obtained by
transforming the cube [-interval_radius,interval_radius]^r via the linear
map induced by ``matrix``.
INPUT:
- ``matrix`` -- a square matrix of real numbers
- ``interval_radius`` -- a real number
OUTPUT:
- a list of tuples of integers
EXAMPLES:
Stretch the interval [-1,1] by a factor of 2 and find the integers in the
resulting interval::
sage: from sage.rings.number_field.bdd_height import integer_points_in_polytope
sage: m = matrix([2])
sage: r = 1
sage: integer_points_in_polytope(m,r)
[(-2), (-1), (0), (1), (2)]
Integer points inside a parallelogram::
sage: from sage.rings.number_field.bdd_height import integer_points_in_polytope
sage: m = matrix([[1, 2],[3, 4]])
sage: r = RealField()(1.3)
sage: integer_points_in_polytope(m,r)
[(-3, -7), (-2, -5), (-2, -4), (-1, -3), (-1, -2), (-1, -1), (0, -1), (0, 0), (0, 1), (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (3, 7)]
Integer points inside a parallelepiped::
sage: from sage.rings.number_field.bdd_height import integer_points_in_polytope
sage: m = matrix([[1.2,3.7,0.2],[-5.3,-.43,3],[1.2,4.7,-2.1]])
sage: r = 2.2
sage: L = integer_points_in_polytope(m,r)
sage: len(L)
4143
If ``interval_radius`` is 0, the output should include only the zero tuple::
sage: from sage.rings.number_field.bdd_height import integer_points_in_polytope
sage: m = matrix([[1,2,3,7],[4,5,6,2],[7,8,9,3],[0,3,4,5]])
sage: integer_points_in_polytope(m,0)
[(0, 0, 0, 0)]
"""
T = matrix
d = interval_radius
r = T.nrows()
# Find the vertices of the given box
box_vertices = [vector(x) for x in product([-d, d], repeat=r)]
# Transform the vertices
T_trans = T.transpose()
transformed_vertices = [v * T_trans for v in box_vertices]
# Create polyhedron from transformed vertices and find integer points inside
return list(Polyhedron(transformed_vertices, base_ring=QQ).integral_points())
def bdd_height(K, height_bound, tolerance=1e-2, precision=53):
r"""
Compute all elements in the number field `K` which have relative
multiplicative height at most ``height_bound``.
The function can only be called for number fields `K` with positive unit
rank. An error will occur if `K` is `QQ` or an imaginary quadratic field.
This algorithm computes 2 lists: L containing elements x in `K` such that
H_k(x) <= B, and a list L' containing elements x in `K` that, due to
floating point issues,
may be slightly larger then the bound. This can be controlled
by lowering the tolerance.
In current implementation both lists (L,L') are merged and returned in
form of iterator.
ALGORITHM:
This is an implementation of the revised algorithm (Algorithm 4) in
[DK2013]_.
INPUT:
- ``height_bound`` -- real number
- ``tolerance`` -- (default: 0.01) a rational number in (0,1]
- ``precision`` -- (default: 53) positive integer
OUTPUT:
- an iterator of number field elements
EXAMPLES:
There are no elements of negative height::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<g> = NumberField(x^5 - x + 7)
sage: list(bdd_height(K,-3))
[]
The only nonzero elements of height 1 are the roots of unity::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<g> = QuadraticField(3)
sage: list(bdd_height(K,1))
[0, -1, 1]
::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<g> = QuadraticField(36865)
sage: len(list(bdd_height(K,101))) # long time (4 s)
131
::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<g> = NumberField(x^6 + 2)
sage: len(list(bdd_height(K,60))) # long time (5 s)
1899
::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<g> = NumberField(x^4 - x^3 - 3*x^2 + x + 1)
sage: len(list(bdd_height(K,10)))
99
TESTS:
Check that :trac:`22771` is fixed::
sage: from sage.rings.number_field.bdd_height import bdd_height
sage: K.<v> = NumberField(x^3 + x + 1)
sage: len(list(bdd_height(K,3)))
23
"""
# global values, used in internal function
B = height_bound
theta = tolerance
if B < 1:
return
embeddings = K.places(prec=precision)
O_K = K.ring_of_integers()
r1, r2 = K.signature()
r = r1 + r2 - 1
RF = RealField(precision)
lambda_gens_approx = {}
class_group_rep_norm_log_approx = []
unit_log_dict = {}
def rational_in(x, y):
r"""
Compute a rational number q, such that x<q<y using Archimedes' axiom
"""
z = y - x
if z == 0:
n = 1
else:
n = RR(1/z).ceil() + 1
if RR(n*y).ceil() is n*y: # WHAT !?
m = n*y - 1
else:
m = RR(n*y).floor()
return m / n
def delta_approximation(x, delta):
r"""
Compute a rational number in range (x-delta, x+delta)
"""
return rational_in(x - delta, x + delta)
def vector_delta_approximation(v, delta):
r"""
Compute a rational vector w=(w1, ..., wn)
such that |vi-wi|<delta for all i in [1, n]
"""
return [delta_approximation(vi, delta) for vi in v]
def log_map(number):
r"""
Compute the image of an element of `K` under the logarithmic map.
"""
x = number
x_logs = []
for i in range(r1):
sigma = embeddings[i] # real embeddings
x_logs.append(sigma(x).abs().log())
for i in range(r1, r + 1):
tau = embeddings[i] # Complex embeddings
x_logs.append(2 * tau(x).abs().log())
return vector(x_logs)
def log_height_for_generators_approx(alpha, beta, Lambda):
r"""
Compute the rational approximation of logarithmic height function.
Return a lambda approximation h_K(alpha/beta)
"""
delta = Lambda / (r + 2)
norm_log = delta_approximation(RR(O_K.ideal(alpha, beta).norm()).log(), delta)
log_ga = vector_delta_approximation(log_map(alpha), delta)
log_gb = vector_delta_approximation(log_map(beta), delta)
arch_sum = sum([max(log_ga[k], log_gb[k]) for k in range(r + 1)])
return (arch_sum - norm_log)
def packet_height(n, pair, u):
r"""
Compute the height of the element of `K` encoded by a given packet.
"""
gens = generator_lists[n]
i = pair[0]
j = pair[1]
Log_gi = lambda_gens_approx[gens[i]]
Log_gj = lambda_gens_approx[gens[j]]
Log_u_gi = vector(Log_gi) + unit_log_dict[u]
arch_sum = sum([max(Log_u_gi[k], Log_gj[k]) for k in range(r + 1)])
return (arch_sum - class_group_rep_norm_log_approx[n])
# Step 1
# Computes ideal class representative and their rational approx norm
t = theta / (3*B)
delta_1 = t / (6*r+12)
class_group_reps = []
class_group_rep_norms = []
for c in K.class_group():
a = c.ideal()
a_norm = a.norm()
log_norm = RF(a_norm).log()
log_norm_approx = delta_approximation(log_norm, delta_1)
class_group_reps.append(a)
class_group_rep_norms.append(a_norm)
class_group_rep_norm_log_approx.append(log_norm_approx)
class_number = len(class_group_reps)
# Step 2
# Find generators for principal ideals of bounded norm
possible_norm_set = set([])
for n in range(class_number):
for m in range(1, (B + 1).ceil()):
possible_norm_set.add(m * class_group_rep_norms[n])
bdd_ideals = bdd_norm_pr_ideal_gens(K, possible_norm_set)
# Stores it in form of an dictionary and gives lambda(g)_approx for key g
for norm in possible_norm_set:
gens = bdd_ideals[norm]
for g in gens:
lambda_g_approx = vector_delta_approximation(log_map(g), delta_1)
lambda_gens_approx[g] = lambda_g_approx
# Step 3
# Find a list of all generators corresponding to each ideal a_l
generator_lists = []
for l in range(class_number):
this_ideal = class_group_reps[l]
this_ideal_norm = class_group_rep_norms[l]
gens = []
for i in range(1, (B + 1).ceil()):
for g in bdd_ideals[i * this_ideal_norm]:
if g in this_ideal:
gens.append(g)
generator_lists.append(gens)
# Step 4
# Finds all relevant pair and their height
gen_height_approx_dictionary = {}
relevant_pair_lists = []
for n in range(class_number):
relevant_pairs = []
gens = generator_lists[n]
l = len(gens)
for i in range(l):
for j in range(i+1, l):
if K.ideal(gens[i], gens[j]) == class_group_reps[n]:
relevant_pairs.append([i, j])
gen_height_approx_dictionary[(n, i, j)] = log_height_for_generators_approx(gens[i], gens[j], t/6)
relevant_pair_lists.append(relevant_pairs)
# Step 5
b = rational_in(t/12 + RR(B).log(), t/4 + RR(B).log())
maximum = 0
for n in range(class_number):
for p in relevant_pair_lists[n]:
maximum = max(maximum, gen_height_approx_dictionary[(n, p[0], p[1])])
d_tilde = b + t/6 + maximum
# Step 6
# computes fundamental units and their value under log map
fund_units = UnitGroup(K).fundamental_units()
fund_unit_logs = [log_map(fund_units[i]) for i in range(r)]
S = column_matrix(fund_unit_logs).delete_rows([r])
S_inverse = S.inverse()
S_norm = S.norm(Infinity)
S_inverse_norm = S_inverse.norm(Infinity)
upper_bound = (r**2) * max(S_norm, S_inverse_norm)
m = RR(upper_bound).ceil() + 1
# Step 7
# Variables needed for rational approximation
lambda_tilde = (t/12) / (d_tilde*r*(1+m))
delta_tilde = min(lambda_tilde/((r**2)*((m**2)+m*lambda_tilde)), 1/(r**2))
M = d_tilde * (upper_bound+lambda_tilde*RR(r).sqrt())
M = RR(M).ceil()
d_tilde = RR(d_tilde)
delta_2 = min(delta_tilde, (t/6)/(r*(r+1)*M))
# Step 8, 9
# Computes relevant points in polytope
fund_unit_log_approx = [vector_delta_approximation(fund_unit_logs[i], delta_2) for i in range(r)]
S_tilde = column_matrix(fund_unit_log_approx).delete_rows([r])
S_tilde_inverse = S_tilde.inverse()
U = integer_points_in_polytope(S_tilde_inverse, d_tilde)
# Step 10
# tilde suffixed list are used for computing second list (L_primed)
yield K(0)
U0 = []
U0_tilde = []
L0 = []
L0_tilde = []
# Step 11
# Computes unit height
unit_height_dict = {}
U_copy = copy(U)
inter_bound = b - (5*t)/12
for u in U:
u_log = sum([u[j]*vector(fund_unit_log_approx[j]) for j in range(r)])
unit_log_dict[u] = u_log
u_height = sum([max(u_log[k], 0) for k in range(r + 1)])
unit_height_dict[u] = u_height
if u_height < inter_bound:
U0.append(u)
if inter_bound <= u_height and u_height < b - (t/12):
U0_tilde.append(u)
if u_height > t/12 + d_tilde:
U_copy.remove(u)
U = U_copy
relevant_tuples = set(U0 + U0_tilde)
# Step 12
# check for relevant packets
for n in range(class_number):
for pair in relevant_pair_lists[n]:
i = pair[0]
j = pair[1]
u_height_bound = b + gen_height_approx_dictionary[(n, i, j)] + t/4
for u in U:
if unit_height_dict[u] < u_height_bound:
candidate_height = packet_height(n, pair, u)
if candidate_height <= b - 7*t/12:
L0.append([n, pair, u])
relevant_tuples.add(u)
elif candidate_height < b + t/4:
L0_tilde.append([n, pair, u])
relevant_tuples.add(u)
# Step 13
# forms a dictionary of all_unit_tuples and their value
tuple_to_unit_dict = {}
for u in relevant_tuples:
unit = K.one()
for k in range(r):
unit *= fund_units[k]**u[k]
tuple_to_unit_dict[u] = unit
# Step 14
# Build all output numbers
roots_of_unity = K.roots_of_unity()
for u in U0 + U0_tilde:
for zeta in roots_of_unity:
yield zeta * tuple_to_unit_dict[u]
# Step 15
for p in L0 + L0_tilde:
gens = generator_lists[p[0]]
i = p[1][0]
j = p[1][1]
u = p[2]
c_p = tuple_to_unit_dict[u] * (gens[i] / gens[j])
for zeta in roots_of_unity:
yield zeta * c_p
yield zeta / c_p
|
the-stack_0_8722 | # -*- coding: utf-8 -*-
""" HTTP API for triggering Earthstar events and
a simple web based controller that connects to the API.
Events are published to a ZeroMQ socket where they
are consumed by the EffectBox (and potentially other subscribers such
as an event logger).
"""
import click
from flask import Flask
from flask_bootstrap import Bootstrap
import zmq
from .blueprints import root
from .blueprints import effect_api
from .blueprints import controller
def create_effect_socket(effect_addr):
""" Create effect socket. """
context = zmq.Context()
effect_socket = context.socket(zmq.PUB)
effect_socket.bind(effect_addr)
return effect_socket
def create_webapp(effect_socket):
""" Create the Earthstar web application. """
app = Flask(__name__)
app.effect_socket = effect_socket
app.register_blueprint(root.root_bp)
app.register_blueprint(effect_api.effect_api_bp)
app.register_blueprint(controller.controller_bp)
Bootstrap(app)
return app
@click.command(context_settings={"auto_envvar_prefix": "ESC"})
@click.option(
'--host', default='localhost',
help='IP address to listen on.')
@click.option(
'--port', default=8080,
help='Port to listen on.')
@click.option(
'--effect-addr', default='tcp://127.0.0.1:5555',
help='ZeroMQ address to publish events to.')
@click.option(
'--debug/--no-debug', default=False,
help='Run with debug on or off.')
def main(host, port, effect_addr, debug):
""" Run the Earthstar effect API and web interface. """
effect_socket = create_effect_socket(effect_addr)
app = create_webapp(effect_socket)
app.run(host=host, port=port, debug=debug)
|
the-stack_0_8723 | from netCDF4 import Dataset
import numpy as np
import tables as tb
from glob import glob
import sys
MISSING_PBL = -1
def read_nc_data(in_file):
rootgrp = Dataset(in_file, "r", format="NETCDF4")
time_axis = rootgrp.variables['time']
height_axis = rootgrp.variables['range']
beta_raw = np.array(rootgrp.variables['beta_raw'])
result = {'time': np.array(time_axis),
'height': np.array(height_axis),
'beta_raw': np.array(beta_raw)}
# data_float = data_np.astype(float)
# data_float[data_np== MISSING_PBL ] = np.NaN
return result
def create_hdf_file(data_tables, out_file):
outfile = tb.open_file(out_file, 'w')
signal_group = outfile.create_group("/", 'raw_signal', 'Raw signal')
for name, data in data_tables.items():
if not data.dtype.fields:
desc = np.dtype([(name, 'f8')])
else:
desc = data.dtype
table = tb.Array(signal_group, name, data)
# table.append(data)
outfile.close()
# def append_to_hdf_file( rec_data, out_file):
#
# outfile = tb.open_file(out_file, 'a')
#
# table = outfile.get_node("/pbl/PBL")
#
# table.append(rec_data)
#
# outfile.close()
in_files = sorted(glob('*_leipzig_CHM080079_000.nc'))
for in_file in in_files:
out_file = '.'.join(in_file.split('.')[:-1]) + ".h5"
create_hdf_file(read_nc_data(in_file), out_file)
|
the-stack_0_8724 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: Joseph Tarango
# *****************************************************************************/
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
import re, sys, os, datetime, inspect, traceback, pprint
##############################################
# Python generic info
##############################################
"""
Requires
https://graphviz.gitlab.io/_pages/Download/windows/graphviz-2.38.msi
install path https://graphviz.gitlab.io/_pages/Download/windows/graphviz-2.38.msi
Add to path C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe
"""
# .exe extension patch for the compiled version of this script
if not re.search(pattern='\.PY$|\.PYC$|\.EXE$', string=os.path.split(sys.argv[0])[1].upper()):
sys.argv[0] = os.path.join(os.path.split(sys.argv[0])[0], os.path.split(sys.argv[0])[1] + '.exe')
##############################################
# Libraries
##############################################
def whoami(annotate=True):
frame = inspect.currentframe().f_back
fileName = inspect.getframeinfo(frame).filename
functionName = inspect.getframeinfo(frame).function
lineNumber = inspect.getframeinfo(frame).lineno
traceContext = pprint.pformat(traceback.format_exc(limit=None, chain=True))
if annotate:
fileName = ''.join(["File=", fileName])
functionName = ''.join(["Function=", functionName])
lineNumber = ''.join(["Line=", str(lineNumber)])
return fileName, functionName, lineNumber, traceContext
def devGraphAll(options, args):
##############################################
# Debug Graphing
##############################################
# import necessary paths
importPath = os.path.abspath(os.getcwd())
importPathNext = os.path.abspath(os.path.join(importPath, "pycallgraph"))
print("Importing Paths: ", str(importPath), str(importPathNext))
sys.path.insert(1, importPath)
sys.path.insert(1, importPathNext)
importPathNext = os.path.abspath(os.path.join(importPath, "pycallgraph", "output"))
print("Importing Paths: ", str(importPath), str(importPathNext))
sys.path.insert(1, importPath)
sys.path.insert(1, importPathNext)
try:
importPathNext = os.path.abspath('C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe')
print("Importing Paths: ", str(importPath), str(importPathNext))
sys.path.insert(1, importPath)
sys.path.insert(1, importPathNext)
except Exception as ErrorContext:
print(ErrorContext)
pass
status = 0
##############################################
# Library
##############################################
try:
from pycallgraph2 import PyCallGraph
from pycallgraph2.output import GraphvizOutput
from pycallgraph2 import Config
##############################################
# Configuration
##############################################
graphviz = GraphvizOutput()
graphviz.output_type = 'svg'
graphviz.output_file = 'pycallgraph.svg'
configList = Config()
configList.output = None
configList.verbose = True
configList.debug = False
configList.groups = True
configList.threaded = False
configList.max_depth = 2 ** 31
print(options, args)
with PyCallGraph(output=graphviz, config=configList):
callReturn = 1
print("PyCallGraphReturn", callReturn)
# status = testDrive(driveNumber) # Debug code goes here
except:
pass
return status
def main():
##############################################
# Main function, Options
##############################################
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--example", action='store_true', dest='example', default=False,
help='Show command execution example.')
parser.add_option("--debug", action='store_true', dest='debug', default=False, help='Debug mode.')
parser.add_option("--verbose", action='store_true', dest='verbose', default=False,
help='Verbose printing for debug use.')
(options, args) = parser.parse_args()
devGraphAll(options=options, args=args)
return 0
# Main Execute
if __name__ == '__main__':
p = datetime.datetime.now()
main()
q = datetime.datetime.now()
print("Execution time: " + str(q - p))
|
the-stack_0_8725 | import numpy as np
import os
import cv2
from .colors import get_color
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def draw_boxes(image, boxes, labels, obj_thresh, quiet=True):
flag = False
label_strs = ''
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
print("box.class[]:"+str(i)+" :"+ str(box.classes[i]))
if box.classes[i] > obj_thresh:
if label_str != '': label_str += ', '
label_str += (labels[i] + ' ' + str(round(box.get_score()*100, 2)) + '%')
label = i
if not quiet: print(label_str)
label_strs += label_str
if label >= 0:
text_size = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)
width, height = text_size[0][0], text_size[0][1]
region = np.array([[box.xmin-3, box.ymin],
[box.xmin-3, box.ymin-height-26],
[box.xmin+width+13, box.ymin-height-26],
[box.xmin+width+13, box.ymin]], dtype='int32')
cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5)
cv2.fillPoly(img=image, pts=[region], color=get_color(label))
cv2.putText(img=image,
text=label_str,
org=(box.xmin+13, box.ymin - 13),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1e-3 * image.shape[0],
color=(0,0,0),
thickness=2)
flag = True
return image, flag, label_strs
|
the-stack_0_8727 | import unittest
from igraph import *
class DirectedUndirectedTests(unittest.TestCase):
def testToUndirected(self):
graph = Graph([(0,1), (0,2), (1,0)], directed=True)
graph2 = graph.copy()
graph2.to_undirected(mode=False)
self.assertTrue(graph2.vcount() == graph.vcount())
self.assertTrue(graph2.is_directed() == False)
self.assertTrue(sorted(graph2.get_edgelist()) == [(0,1), (0,1), (0,2)])
graph2 = graph.copy()
graph2.to_undirected()
self.assertTrue(graph2.vcount() == graph.vcount())
self.assertTrue(graph2.is_directed() == False)
self.assertTrue(sorted(graph2.get_edgelist()) == [(0,1), (0,2)])
graph2 = graph.copy()
graph2.es["weight"] = [1,2,3]
graph2.to_undirected(mode="collapse", combine_edges="sum")
self.assertTrue(graph2.vcount() == graph.vcount())
self.assertTrue(graph2.is_directed() == False)
self.assertTrue(sorted(graph2.get_edgelist()) == [(0,1), (0,2)])
self.assertTrue(graph2.es["weight"] == [4,2])
graph = Graph([(0,1),(1,0),(0,1),(1,0),(2,1),(1,2)], directed=True)
graph2 = graph.copy()
graph2.es["weight"] = [1,2,3,4,5,6]
graph2.to_undirected(mode="mutual", combine_edges="sum")
self.assertTrue(graph2.vcount() == graph.vcount())
self.assertTrue(graph2.is_directed() == False)
self.assertTrue(sorted(graph2.get_edgelist()) == [(0,1), (0,1), (1,2)])
self.assertTrue(graph2.es["weight"] == [7,3,11] or graph2.es["weight"] == [3,7,11])
def testToDirected(self):
graph = Graph([(0,1), (0,2), (2,3), (2,4)], directed=False)
graph.to_directed()
self.assertTrue(graph.is_directed())
self.assertTrue(graph.vcount() == 5)
self.assertTrue(sorted(graph.get_edgelist()) == \
[(0,1), (0,2), (1,0), (2,0), (2,3), (2,4), (3,2), (4,2)]
)
class GraphRepresentationTests(unittest.TestCase):
def testGetAdjacency(self):
# Undirected case
g = Graph.Tree(6, 3)
g.es["weight"] = range(5)
self.assertTrue(g.get_adjacency() == Matrix([
[0, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]
]))
self.assertTrue(g.get_adjacency(attribute="weight") == Matrix([
[0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 3, 4],
[1, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0],
[0, 4, 0, 0, 0, 0]
]))
self.assertTrue(g.get_adjacency(eids=True) == Matrix([
[0, 1, 2, 3, 0, 0],
[1, 0, 0, 0, 4, 5],
[2, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[0, 4, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0]
])-1)
# Directed case
g = Graph.Tree(6, 3, "tree_out")
g.add_edges([(0,1), (1,0)])
self.assertTrue(g.get_adjacency() == Matrix([
[0, 2, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]))
def suite():
direction_suite = unittest.makeSuite(DirectedUndirectedTests)
representation_suite = unittest.makeSuite(GraphRepresentationTests)
return unittest.TestSuite([direction_suite,
representation_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
the-stack_0_8728 | import pytest
import sdk_cmd
import sdk_install
import sdk_plan
from tests import config
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
options = {
"service": {
"spec_file": "examples/discovery.yml"
}
}
sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
def test_task_dns_prefix_points_to_all_tasks():
pod_info = sdk_cmd.service_request('GET', config.SERVICE_NAME, '/v1/pod/hello-0/info').json()
# Assert that DiscoveryInfo is correctly set on tasks.
assert(all(p["info"]["discovery"]["name"] == "hello-0" for p in pod_info))
# Assert that the hello-0.hello-world.mesos DNS entry points to the right IP.
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
|
the-stack_0_8732 | # -*- coding: utf-8 -*-
'''
Module for returning various status data about a minion.
These data can be useful for compiling into stats later,
or for problem solving if your minion is having problems.
.. versionadded:: 0.12.0
:depends: - wmi
'''
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
import ctypes
import datetime
import logging
import subprocess
log = logging.getLogger(__name__)
# Import Salt Libs
import salt.utils.event
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.win_pdh
from salt.utils.network import host_to_ips as _host_to_ips
from salt.utils.functools import namespaced_function as _namespaced_function
# Import 3rd party Libs
from salt.ext import six
# These imports needed for namespaced functions
# pylint: disable=W0611
from salt.modules.status import ping_master, time_
import copy
# pylint: enable=W0611
# Import 3rd Party Libs
try:
if salt.utils.platform.is_windows():
import wmi
import salt.utils.winapi
HAS_WMI = True
else:
HAS_WMI = False
except ImportError:
HAS_WMI = False
HAS_PSUTIL = False
if salt.utils.platform.is_windows():
import psutil
HAS_PSUTIL = True
__opts__ = {}
__virtualname__ = 'status'
# Taken from https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/performance.htm
class SYSTEM_PERFORMANCE_INFORMATION(ctypes.Structure):
_fields_ = [('IdleProcessTime', ctypes.c_int64),
('IoReadTransferCount', ctypes.c_int64),
('IoWriteTransferCount', ctypes.c_int64),
('IoOtherTransferCount', ctypes.c_int64),
('IoReadOperationCount', ctypes.c_ulong),
('IoWriteOperationCount', ctypes.c_ulong),
('IoOtherOperationCount', ctypes.c_ulong),
('AvailablePages', ctypes.c_ulong),
('CommittedPages', ctypes.c_ulong),
('CommitLimit', ctypes.c_ulong),
('PeakCommitment', ctypes.c_ulong),
('PageFaultCount', ctypes.c_ulong),
('CopyOnWriteCount', ctypes.c_ulong),
('TransitionCount', ctypes.c_ulong),
('CacheTransitionCount', ctypes.c_ulong),
('DemandZeroCount', ctypes.c_ulong),
('PageReadCount', ctypes.c_ulong),
('PageReadIoCount', ctypes.c_ulong),
('CacheReadCount', ctypes.c_ulong), # Was c_ulong ** 2
('CacheIoCount', ctypes.c_ulong),
('DirtyPagesWriteCount', ctypes.c_ulong),
('DirtyWriteIoCount', ctypes.c_ulong),
('MappedPagesWriteCount', ctypes.c_ulong),
('MappedWriteIoCount', ctypes.c_ulong),
('PagedPoolPages', ctypes.c_ulong),
('NonPagedPoolPages', ctypes.c_ulong),
('PagedPoolAllocs', ctypes.c_ulong),
('PagedPoolFrees', ctypes.c_ulong),
('NonPagedPoolAllocs', ctypes.c_ulong),
('NonPagedPoolFrees', ctypes.c_ulong),
('FreeSystemPtes', ctypes.c_ulong),
('ResidentSystemCodePage', ctypes.c_ulong),
('TotalSystemDriverPages', ctypes.c_ulong),
('TotalSystemCodePages', ctypes.c_ulong),
('NonPagedPoolLookasideHits', ctypes.c_ulong),
('PagedPoolLookasideHits', ctypes.c_ulong),
('AvailablePagedPoolPages', ctypes.c_ulong),
('ResidentSystemCachePage', ctypes.c_ulong),
('ResidentPagedPoolPage', ctypes.c_ulong),
('ResidentSystemDriverPage', ctypes.c_ulong),
('CcFastReadNoWait', ctypes.c_ulong),
('CcFastReadWait', ctypes.c_ulong),
('CcFastReadResourceMiss', ctypes.c_ulong),
('CcFastReadNotPossible', ctypes.c_ulong),
('CcFastMdlReadNoWait', ctypes.c_ulong),
('CcFastMdlReadWait', ctypes.c_ulong),
('CcFastMdlReadResourceMiss', ctypes.c_ulong),
('CcFastMdlReadNotPossible', ctypes.c_ulong),
('CcMapDataNoWait', ctypes.c_ulong),
('CcMapDataWait', ctypes.c_ulong),
('CcMapDataNoWaitMiss', ctypes.c_ulong),
('CcMapDataWaitMiss', ctypes.c_ulong),
('CcPinMappedDataCount', ctypes.c_ulong),
('CcPinReadNoWait', ctypes.c_ulong),
('CcPinReadWait', ctypes.c_ulong),
('CcPinReadNoWaitMiss', ctypes.c_ulong),
('CcPinReadWaitMiss', ctypes.c_ulong),
('CcCopyReadNoWait', ctypes.c_ulong),
('CcCopyReadWait', ctypes.c_ulong),
('CcCopyReadNoWaitMiss', ctypes.c_ulong),
('CcCopyReadWaitMiss', ctypes.c_ulong),
('CcMdlReadNoWait', ctypes.c_ulong),
('CcMdlReadWait', ctypes.c_ulong),
('CcMdlReadNoWaitMiss', ctypes.c_ulong),
('CcMdlReadWaitMiss', ctypes.c_ulong),
('CcReadAheadIos', ctypes.c_ulong),
('CcLazyWriteIos', ctypes.c_ulong),
('CcLazyWritePages', ctypes.c_ulong),
('CcDataFlushes', ctypes.c_ulong),
('CcDataPages', ctypes.c_ulong),
('ContextSwitches', ctypes.c_ulong),
('FirstLevelTbFills', ctypes.c_ulong),
('SecondLevelTbFills', ctypes.c_ulong),
('SystemCalls', ctypes.c_ulong),
# Windows 8 and above
('CcTotalDirtyPages', ctypes.c_ulonglong),
('CcDirtyPagesThreshold', ctypes.c_ulonglong),
('ResidentAvailablePages', ctypes.c_longlong),
# Windows 10 and above
('SharedCommittedPages', ctypes.c_ulonglong)]
def __virtual__():
'''
Only works on Windows systems with WMI and WinAPI
'''
if not salt.utils.platform.is_windows():
return False, 'win_status.py: Requires Windows'
if not HAS_WMI:
return False, 'win_status.py: Requires WMI and WinAPI'
if not HAS_PSUTIL:
return False, 'win_status.py: Requires psutil'
# Namespace modules from `status.py`
global ping_master, time_
ping_master = _namespaced_function(ping_master, globals())
time_ = _namespaced_function(time_, globals())
return __virtualname__
__func_alias__ = {
'time_': 'time'
}
def cpustats():
'''
Return information about the CPU.
Returns
dict: A dictionary containing information about the CPU stats
CLI Example:
.. code-block:: bash
salt * status.cpustats
'''
# Tries to gather information similar to that returned by a Linux machine
# Avoid using WMI as there's a lot of overhead
# Time related info
user, system, idle, interrupt, dpc = psutil.cpu_times()
cpu = {'user': user,
'system': system,
'idle': idle,
'irq': interrupt,
'dpc': dpc}
# Count related info
ctx_switches, interrupts, soft_interrupts, sys_calls = psutil.cpu_stats()
intr = {'irqs': {'irqs': [],
'total': interrupts}}
soft_irq = {'softirqs': [],
'total': soft_interrupts}
return {'btime': psutil.boot_time(),
'cpu': cpu,
'ctxt': ctx_switches,
'intr': intr,
'processes': len(psutil.pids()),
'softirq': soft_irq,
'syscalls': sys_calls}
def meminfo():
'''
Return information about physical and virtual memory on the system
Returns:
dict: A dictionary of information about memory on the system
CLI Example:
.. code-block:: bash
salt * status.meminfo
'''
# Get physical memory
vm_total, vm_available, vm_percent, vm_used, vm_free = psutil.virtual_memory()
# Get swap memory
swp_total, swp_used, swp_free, swp_percent, _, _ = psutil.swap_memory()
def get_unit_value(memory):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if memory >= prefix[s]:
value = float(memory) / prefix[s]
return {'unit': s,
'value': value}
return {'unit': 'B',
'value': memory}
return {'VmallocTotal': get_unit_value(vm_total),
'VmallocUsed': get_unit_value(vm_used),
'VmallocFree': get_unit_value(vm_free),
'VmallocAvail': get_unit_value(vm_available),
'SwapTotal': get_unit_value(swp_total),
'SwapUsed': get_unit_value(swp_used),
'SwapFree': get_unit_value(swp_free)}
def vmstats():
'''
Return information about the virtual memory on the machine
Returns:
dict: A dictionary of virtual memory stats
CLI Example:
.. code-block:: bash
salt * status.vmstats
'''
# Setup the SPI Structure
spi = SYSTEM_PERFORMANCE_INFORMATION()
retlen = ctypes.c_ulong()
# 2 means to query System Performance Information and return it in a
# SYSTEM_PERFORMANCE_INFORMATION Structure
ctypes.windll.ntdll.NtQuerySystemInformation(
2, ctypes.byref(spi), ctypes.sizeof(spi), ctypes.byref(retlen))
# Return each defined field in a dict
ret = {}
for field in spi._fields_:
ret.update({field[0]: getattr(spi, field[0])})
return ret
def loadavg():
'''
Returns counter information related to the load of the machine
Returns:
dict: A dictionary of counters
CLI Example:
.. code-block:: bash
salt * status.loadavg
'''
# Counter List (obj, instance, counter)
counter_list = [
('Memory', None, 'Available Bytes'),
('Memory', None, 'Pages/sec'),
('Paging File', '*', '% Usage'),
('Processor', '*', '% Processor Time'),
('Processor', '*', 'DPCs Queued/sec'),
('Processor', '*', '% Privileged Time'),
('Processor', '*', '% User Time'),
('Processor', '*', '% DPC Time'),
('Processor', '*', '% Interrupt Time'),
('Server', None, 'Work Item Shortages'),
('Server Work Queues', '*', 'Queue Length'),
('System', None, 'Processor Queue Length'),
('System', None, 'Context Switches/sec'),
]
return salt.utils.win_pdh.get_counters(counter_list=counter_list)
def cpuload():
'''
.. versionadded:: 2015.8.0
Return the processor load as a percentage
CLI Example:
.. code-block:: bash
salt '*' status.cpuload
'''
return psutil.cpu_percent()
def diskusage(human_readable=False, path=None):
'''
.. versionadded:: 2015.8.0
Return the disk usage for this minion
human_readable : False
If ``True``, usage will be in KB/MB/GB etc.
CLI Example:
.. code-block:: bash
salt '*' status.diskusage path=c:/salt
'''
if not path:
path = 'c:/'
disk_stats = psutil.disk_usage(path)
total_val = disk_stats.total
used_val = disk_stats.used
free_val = disk_stats.free
percent = disk_stats.percent
if human_readable:
total_val = _byte_calc(total_val)
used_val = _byte_calc(used_val)
free_val = _byte_calc(free_val)
return {'total': total_val,
'used': used_val,
'free': free_val,
'percent': percent}
def procs(count=False):
'''
Return the process data
count : False
If ``True``, this function will simply return the number of processes.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' status.procs
salt '*' status.procs count
'''
with salt.utils.winapi.Com():
wmi_obj = wmi.WMI()
processes = wmi_obj.win32_process()
#this short circuit's the function to get a short simple proc count.
if count:
return len(processes)
#a propper run of the function, creating a nonsensically long out put.
process_info = {}
for proc in processes:
process_info[proc.ProcessId] = _get_process_info(proc)
return process_info
def saltmem(human_readable=False):
'''
.. versionadded:: 2015.8.0
Returns the amount of memory that salt is using
human_readable : False
return the value in a nicely formatted number
CLI Example:
.. code-block:: bash
salt '*' status.saltmem
salt '*' status.saltmem human_readable=True
'''
# psutil.Process defaults to current process (`os.getpid()`)
p = psutil.Process()
# Use oneshot to get a snapshot
with p.oneshot():
mem = p.memory_info().rss
if human_readable:
return _byte_calc(mem)
return mem
def uptime(human_readable=False):
'''
.. versionadded:: 2015.8.0
Return the system uptime for the machine
Args:
human_readable (bool):
Return uptime in human readable format if ``True``, otherwise
return seconds. Default is ``False``
.. note::
Human readable format is ``days, hours:min:sec``. Days will only
be displayed if more than 0
Returns:
str:
The uptime in seconds or human readable format depending on the
value of ``human_readable``
CLI Example:
.. code-block:: bash
salt '*' status.uptime
salt '*' status.uptime human_readable=True
'''
# Get startup time
startup_time = datetime.datetime.fromtimestamp(psutil.boot_time())
# Subtract startup time from current time to get the uptime of the system
uptime = datetime.datetime.now() - startup_time
return six.text_type(uptime) if human_readable else uptime.total_seconds()
def _get_process_info(proc):
'''
Return process information
'''
cmd = salt.utils.stringutils.to_unicode(proc.CommandLine or '')
name = salt.utils.stringutils.to_unicode(proc.Name)
info = dict(
cmd=cmd,
name=name,
**_get_process_owner(proc)
)
return info
def _get_process_owner(process):
owner = {}
domain, error_code, user = None, None, None
try:
domain, error_code, user = process.GetOwner()
owner['user'] = salt.utils.stringutils.to_unicode(user)
owner['user_domain'] = salt.utils.stringutils.to_unicode(domain)
except Exception as exc:
pass
if not error_code and all((user, domain)):
owner['user'] = salt.utils.stringutils.to_unicode(user)
owner['user_domain'] = salt.utils.stringutils.to_unicode(domain)
elif process.ProcessId in [0, 4] and error_code == 2:
# Access Denied for System Idle Process and System
owner['user'] = 'SYSTEM'
owner['user_domain'] = 'NT AUTHORITY'
else:
log.warning('Error getting owner of process; PID=\'%s\'; Error: %s',
process.ProcessId, error_code)
return owner
def _byte_calc(val):
if val < 1024:
tstr = six.text_type(val)+'B'
elif val < 1038336:
tstr = six.text_type(val/1024)+'KB'
elif val < 1073741824:
tstr = six.text_type(val/1038336)+'MB'
elif val < 1099511627776:
tstr = six.text_type(val/1073741824)+'GB'
else:
tstr = six.text_type(val/1099511627776)+'TB'
return tstr
def master(master=None, connected=True):
'''
.. versionadded:: 2015.5.0
Fire an event if the minion gets disconnected from its master. This
function is meant to be run via a scheduled job from the minion. If
master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4
address.
CLI Example:
.. code-block:: bash
salt '*' status.master
'''
def _win_remotes_on(port):
'''
Windows specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
PS C:> netstat -n -p TCP
Active Connections
Proto Local Address Foreign Address State
TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED
TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT
TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT
TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED
TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_unicode(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
remote_host, remote_port = chunks[2].rsplit(':', 1)
if int(remote_port) != port:
continue
remotes.add(remote_host)
return remotes
# the default publishing port
port = 4505
master_ips = None
if master:
master_ips = _host_to_ips(master)
if not master_ips:
return
if __salt__['config.get']('publish_port') != '':
port = int(__salt__['config.get']('publish_port'))
master_connection_status = False
connected_ips = _win_remotes_on(port)
# Get connection status for master
for master_ip in master_ips:
if master_ip in connected_ips:
master_connection_status = True
break
# Connection to master is not as expected
if master_connection_status is not connected:
with salt.utils.event.get_event('minion', opts=__opts__, listen=False) as event_bus:
if master_connection_status:
event_bus.fire_event({'master': master}, salt.minion.master_event(type='connected'))
else:
event_bus.fire_event({'master': master}, salt.minion.master_event(type='disconnected'))
return master_connection_status
|
the-stack_0_8734 |
from bentoml import BentoService, api, env, artifacts
from bentoml.artifact import PickleArtifact
from bentoml.adapters import FileInput
@artifacts([PickleArtifact('model')])
@env(pip_dependencies=['easyocr'],
conda_channels=["conda-forge"],
conda_dependencies=["ruamel.yaml"])
class TextDetectionService(BentoService):
@api(input=FileInput())
def predict(self, image):
result = self.artifacts.model.detect_text(image[0])
return result
|
the-stack_0_8735 | """
modify file with additions or substitutions, and making as few other changes
as possible (no formatting, whitespace, encoding etc)
Authors:
Carl Anderson ([email protected])
"""
import os
import logging
class FileModifier:
"""
class that modifies file with additions or substitutions, and doing so
with making as few other changes as possible (no formatting, whitespace, encoding etc)
"""
COMMENT = "# programmatically added by LookML modifier"
def __init__(self, filename):
"""initialize the FileModifier
Args:
filename (str): filename
"""
if not os.path.exists(filename):
raise IOError("Filename does not exist: %s" % filename)
logging.info("Reading in file %s", filename)
self.lines = open(filename, "r").readlines()
def is_header(self, line, header_type, header_name):
"""looking for start of dimension or header, e.g.
"dimension: header_name {"
Args:
line (str): line from a file
header_type (str): e.g. dimension
header_name (str): e.g. header_name (in example above)
Returns:
bool: is this chunk a header?
"""
start = header_type + ":"
# FIXME this assumes brace is on same line. Valid LookML means that it doesn't have to be
if (
line.strip().startswith(start)
and line.split(start)[1].split("{")[0].strip() == header_name
):
return True
return False
def handle_description_addition(self, definition_type, name, description):
"""add in a new description
Args:
definition_type (str): 'measure' or 'dimension'
name (str): name of measure or dimension
description (str): description to add
Returns:
nothing. Side effect is to add lines to self.lines
"""
new_lines = []
for line in self.lines:
if self.is_header(line, definition_type, name):
line_to_add = ' description: "%s"\t%s\n' % (
description,
FileModifier.COMMENT,
)
logging.info("Adding in line: %s" % line_to_add)
new_lines.append(line) # header
new_lines.append(line_to_add)
else:
new_lines.append(line)
self.lines = new_lines
def handle_desription_substitution(
self, num_lines, definition_type, name, description
):
"""as description exists, we need to find the header, then look for description after it,
consume all the lines of the current description, and add the new description
Args:
num_lines (int): number of lines in the existing description
definition_type (str): 'measure' or 'dimension'
name (str): name of measure or dimension
description (str): description to add
Returns:
Nothing. Side effect to save to self.lines
"""
new_lines = []
iterator = iter(self.lines)
while iterator:
try:
line = next(iterator)
if self.is_header(line, definition_type, name):
new_lines.append(line)
ct = 0
while True:
line = next(iterator)
ct += 1
if line.strip().startswith("description"):
logging.info("found description %d lines after header", ct)
# consume the other lines for this existing description
for i in range(num_lines):
line = next(iterator)
# inject our new description
line_to_add = ' description: "%s"\t%s\n' % (
description,
FileModifier.COMMENT,
)
logging.info("Adding in line: %s", line_to_add)
new_lines.append(line_to_add)
break
else:
new_lines.append(line)
new_lines.append(line)
except StopIteration:
break
self.lines = new_lines
def modify(self, num_lines, definition_type, name, description, has_key):
"""
modify an entry
Args:
num_lines (int): number of lines to substitute
has_key (bool): do we have a description key for the definition_type,
name (str): name of dimension, dimension_group, or measure
description (str): correct description
Returns:
nothing. Side effect is to update self.lines with correct info
"""
if not has_key:
self.handle_description_addition(definition_type, name, description)
else:
self.handle_desription_substitution(
num_lines, definition_type, name, description
)
def write(self, filename):
"""write modified LookML to filename
Args:
filename (str): filepath of file to write to
Returns:
nothing. Side effect is to write data to file
"""
logging.info("Writing LookML to %s" % filename)
with open(filename, "w") as the_file:
for line in self.lines:
the_file.write(line)
|
the-stack_0_8736 | import datetime
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from compose.models import DailyEntry, get_current_streak, get_longest_streak
@login_required
def fetch(request):
today = datetime.date.today()
user = request.user
entry = DailyEntry.objects.today(user=user)
total_word_count = sum(e.word_count
for e in DailyEntry.objects.filter(user=user, date__lt=today))
word_count_this_month = sum(e.word_count
for e in DailyEntry.objects.filter(user=user, date__month=today.month))
response = {
'longest_streak': get_longest_streak(user),
'streak_length': get_current_streak(user),
'text': entry.text,
'total_word_count': total_word_count,
'word_count': entry.word_count,
'word_count_goal': entry.word_count_goal,
'word_count_this_month': word_count_this_month,
}
return JsonResponse(response)
@login_required
def update(request):
if request.method == 'POST':
try:
obj = json.loads(request.body.decode('utf-8'))
except (json.JSONDecodeError, UnicodeDecodeError):
return HttpResponse(status_code=400)
text = obj.get('text')
word_count_goal = obj.get('word_count_goal')
entry = DailyEntry.objects.today(user=request.user)
if text is not None:
entry.text = text
if word_count_goal is not None:
entry.word_count_goal = word_count_goal
entry.save()
return HttpResponse()
else:
return redirect('compose:index')
|
the-stack_0_8738 | import pandas as pd
pd.DataFrame()
class RailwayForm:
formType = "RailwayForm"
def printData(self):
print(f"Name is {self.name}")
print(f"Train is {self.train}")
harrysApplication = RailwayForm()
harrysApplication.name = "Harry"
harrysApplication.train = "Rajdhani Express"
harrysApplication.printData() |
the-stack_0_8739 | import argparse
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, Optional
from emmental.utils.utils import (
nullable_float,
nullable_int,
nullable_string,
str2bool,
str2dict,
)
def parse_args(parser: Optional[ArgumentParser] = None) -> ArgumentParser:
r"""Parse the configuration from command line.
Args:
parser(ArgumentParser): The exterenl argument parser object, defaults to None.
Returns:
ArgumentParser: The updated argument parser object.
"""
if parser is None:
parser = argparse.ArgumentParser(
"Emmental configuration",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Load meta configuration
meta_config = parser.add_argument_group("Meta configuration")
meta_config.add_argument(
"--seed",
type=nullable_int,
default=None,
help="Random seed for all numpy/torch/cuda operations in model and learning",
)
meta_config.add_argument(
"--verbose",
type=str2bool,
default=True,
help="Whether to print the log information",
)
meta_config.add_argument(
"--log_path", type=str, default="logs", help="Directory to save running log"
)
# Load data configuration
data_config = parser.add_argument_group("Data configuration")
data_config.add_argument(
"--min_data_len", type=int, default=0, help="Minimal data length"
)
data_config.add_argument(
"--max_data_len",
type=int,
default=0,
help="Maximal data length (0 for no max_len)",
)
# Load model configuration
model_config = parser.add_argument_group("Model configuration")
model_config.add_argument(
"--model_path",
type=nullable_string,
default=None,
help="Path to pretrained model",
)
model_config.add_argument(
"--device",
type=int,
default=0,
help="Which device to use (-1 for cpu or gpu id (e.g., 0 for cuda:0))",
)
model_config.add_argument(
"--dataparallel",
type=str2bool,
default=True,
help="Whether to use dataparallel or not",
)
# Learning configuration
learner_config = parser.add_argument_group("Learning configuration")
learner_config.add_argument(
"--fp16",
type=str2bool,
default=False,
help="Whether to use half precision to train",
)
learner_config.add_argument(
"--n_epochs", type=int, default=1, help="Total number of learning epochs"
)
learner_config.add_argument(
"--train_split",
nargs="+",
type=str,
default=["train"],
help="The split for training",
)
learner_config.add_argument(
"--valid_split",
nargs="+",
type=str,
default=["valid"],
help="The split for validation",
)
learner_config.add_argument(
"--test_split",
nargs="+",
type=str,
default=["test"],
help="The split for testing",
)
learner_config.add_argument(
"--ignore_index",
type=nullable_int,
default=None,
help="The ignore index, uses for masking samples",
)
# Optimizer configuration
optimizer_config = parser.add_argument_group("Optimizer configuration")
optimizer_config.add_argument(
"--optimizer",
type=nullable_string,
default="adam",
choices=[
"asgd",
"adadelta",
"adagrad",
"adam",
"adamw",
"adamax",
"lbfgs",
"rms_prop",
"r_prop",
"sgd",
"sparse_adam",
"bert_adam",
None,
],
help="The optimizer to use",
)
optimizer_config.add_argument("--lr", type=float, default=1e-3, help="Learing rate")
optimizer_config.add_argument(
"--l2", type=float, default=0.0, help="l2 regularization"
)
optimizer_config.add_argument(
"--grad_clip", type=nullable_float, default=None, help="Gradient clipping"
)
# ASGD config
optimizer_config.add_argument(
"--asgd_lambd", type=float, default=0.0001, help="ASGD lambd"
)
optimizer_config.add_argument(
"--asgd_alpha", type=float, default=0.75, help="ASGD alpha"
)
optimizer_config.add_argument(
"--asgd_t0", type=float, default=1000000.0, help="ASGD t0"
)
# Adadelta config
optimizer_config.add_argument(
"--adadelta_rho", type=float, default=0.9, help="Adadelta rho"
)
optimizer_config.add_argument(
"--adadelta_eps", type=float, default=0.000001, help="Adadelta eps"
)
# Adagrad config
optimizer_config.add_argument(
"--adagrad_lr_decay", type=float, default=0, help="Adagrad lr_decay"
)
optimizer_config.add_argument(
"--adagrad_initial_accumulator_value",
type=float,
default=0,
help="Adagrad initial accumulator value",
)
optimizer_config.add_argument(
"--adagrad_eps", type=float, default=0.0000000001, help="Adagrad eps"
)
# Adam config
optimizer_config.add_argument(
"--adam_betas", nargs="+", type=float, default=(0.9, 0.999), help="Adam betas"
)
optimizer_config.add_argument(
"--adam_eps", type=float, default=1e-8, help="Adam eps"
)
optimizer_config.add_argument(
"--adam_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of adam",
)
# AdamW config
optimizer_config.add_argument(
"--adamw_betas", nargs="+", type=float, default=(0.9, 0.999), help="AdamW betas"
)
optimizer_config.add_argument(
"--adamw_eps", type=float, default=1e-8, help="AdamW eps"
)
optimizer_config.add_argument(
"--adamw_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of AdamW",
)
# Adamax config
optimizer_config.add_argument(
"--adamax_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="Adamax betas",
)
optimizer_config.add_argument(
"--adamax_eps", type=float, default=1e-8, help="Adamax eps"
)
# LBFGS config
optimizer_config.add_argument(
"--lbfgs_max_iter", type=int, default=20, help="LBFGS max iter"
)
optimizer_config.add_argument(
"--lbfgs_max_eval", type=nullable_int, default=None, help="LBFGS max eval"
)
optimizer_config.add_argument(
"--lbfgs_tolerance_grad", type=float, default=1e-07, help="LBFGS tolerance grad"
)
optimizer_config.add_argument(
"--lbfgs_tolerance_change",
type=float,
default=1e-09,
help="LBFGS tolerance change",
)
optimizer_config.add_argument(
"--lbfgs_history_size", type=int, default=100, help="LBFGS history size"
)
optimizer_config.add_argument(
"--lbfgs_line_search_fn",
type=nullable_string,
default=None,
help="LBFGS line search fn",
)
# RMSprop config
optimizer_config.add_argument(
"--rms_prop_alpha", type=float, default=0.99, help="RMSprop alpha"
)
optimizer_config.add_argument(
"--rms_prop_eps", type=float, default=1e-08, help="RMSprop eps"
)
optimizer_config.add_argument(
"--rms_prop_momentum", type=float, default=0, help="RMSprop momentum"
)
optimizer_config.add_argument(
"--rms_prop_centered", type=str2bool, default=False, help="RMSprop centered"
)
# Rprop config
optimizer_config.add_argument(
"--r_prop_etas", nargs="+", type=float, default=(0.5, 1.2), help="Rprop etas"
)
optimizer_config.add_argument(
"--r_prop_step_sizes",
nargs="+",
type=float,
default=(1e-06, 50),
help="Rprop step sizes",
)
# SGD config
optimizer_config.add_argument(
"--sgd_momentum", type=float, default=0, help="SGD momentum"
)
optimizer_config.add_argument(
"--sgd_dampening", type=float, default=0, help="SGD dampening"
)
optimizer_config.add_argument(
"--sgd_nesterov", type=str2bool, default=False, help="SGD nesterov"
)
# SparseAdam config
optimizer_config.add_argument(
"--sparse_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="SparseAdam betas",
)
optimizer_config.add_argument(
"--sparse_adam_eps", type=float, default=1e-08, help="SparseAdam eps"
)
# BertAdam config
optimizer_config.add_argument(
"--bert_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="BertAdam betas",
)
optimizer_config.add_argument(
"--bert_adam_eps", type=float, default=1e-08, help="BertAdam eps"
)
# Scheduler configuration
scheduler_config = parser.add_argument_group("Scheduler configuration")
scheduler_config.add_argument(
"--lr_scheduler",
type=nullable_string,
default=None,
choices=[
"linear",
"exponential",
"plateau",
"step",
"multi_step",
"cyclic",
"one_cycle",
"cosine_annealing",
],
help="Learning rate scheduler",
)
scheduler_config.add_argument(
"--lr_scheduler_step_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Learning rate scheduler step unit",
)
scheduler_config.add_argument(
"--lr_scheduler_step_freq",
type=int,
default=1,
help="Learning rate scheduler step freq",
)
scheduler_config.add_argument(
"--warmup_steps", type=float, default=None, help="Warm up steps"
)
scheduler_config.add_argument(
"--warmup_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Warm up unit",
)
scheduler_config.add_argument(
"--warmup_percentage", type=float, default=None, help="Warm up percentage"
)
scheduler_config.add_argument(
"--min_lr", type=float, default=0.0, help="Minimum learning rate"
)
scheduler_config.add_argument(
"--exponential_lr_scheduler_gamma",
type=float,
default=0.9,
help="Gamma for exponential lr scheduler",
)
# ReduceLROnPlateau lr scheduler config
scheduler_config.add_argument(
"--plateau_lr_scheduler_metric",
type=str,
default="model/train/all/loss",
help="Metric of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_mode",
type=str,
default="min",
choices=["min", "max"],
help="Mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_factor",
type=float,
default=0.1,
help="Factor of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_patience",
type=int,
default=10,
help="Patience for plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_threshold",
type=float,
default=0.0001,
help="Threshold of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_threshold_mode",
type=str,
default="rel",
choices=["rel", "abs"],
help="Threshold mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_cooldown",
type=int,
default=0,
help="Cooldown of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_eps",
type=float,
default=0.00000001,
help="Eps of plateau lr scheduler",
)
# Step lr scheduler config
scheduler_config.add_argument(
"--step_lr_scheduler_step_size",
type=int,
default=1,
help="Period of learning rate decay",
)
scheduler_config.add_argument(
"--step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_milestones",
nargs="+",
type=int,
default=[1000],
help="List of epoch indices. Must be increasing.",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
# Cyclic lr scheduler config
scheduler_config.add_argument(
"--cyclic_lr_scheduler_base_lr",
nargs="+",
type=float,
default=0.001,
help="Base lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_step_size_up",
type=int,
default=2000,
help="Step size up of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_step_size_down",
type=nullable_int,
default=None,
help="Step size down of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_mode",
type=nullable_string,
default="triangular",
help="Mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_gamma",
type=float,
default=1.0,
help="Gamma of cyclic lr scheduler",
)
# TODO: support cyclic_lr_scheduler_scale_fn
scheduler_config.add_argument(
"--cyclic_lr_scheduler_scale_mode",
type=str,
default="cycle",
choices=["cycle", "iterations"],
help="Scale mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.8,
help="Base momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.9,
help="Max momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of cyclic lr scheduler",
)
# One cycle lr scheduler config
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_pct_start",
type=float,
default=0.3,
help="Percentage start of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_anneal_strategy",
type=str,
default="cos",
choices=["cos", "linear"],
help="Anneal strategyr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.85,
help="Base momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.95,
help="Max momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_div_factor",
type=float,
default=25,
help="Div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_final_div_factor",
type=float,
default=1e4,
help="Final div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cosine_annealing_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
scheduler_config.add_argument(
"--task_scheduler",
type=str,
default="round_robin",
# choices=["sequential", "round_robin", "mixed"],
help="Task scheduler",
)
scheduler_config.add_argument(
"--sequential_scheduler_fillup",
type=str2bool,
default=False,
help="Whether fillup in sequential scheduler",
)
scheduler_config.add_argument(
"--round_robin_scheduler_fillup",
type=str2bool,
default=False,
help="whether fillup in round robin scheduler",
)
scheduler_config.add_argument(
"--mixed_scheduler_fillup",
type=str2bool,
default=False,
help="whether fillup in mixed scheduler scheduler",
)
# Logging configuration
logging_config = parser.add_argument_group("Logging configuration")
logging_config.add_argument(
"--counter_unit",
type=str,
default="epoch",
choices=["epoch", "batch"],
help="Logging unit (epoch, batch)",
)
logging_config.add_argument(
"--evaluation_freq", type=float, default=1, help="Logging evaluation frequency"
)
logging_config.add_argument(
"--writer",
type=str,
default="tensorboard",
choices=["json", "tensorboard"],
help="The writer format (json, tensorboard)",
)
logging_config.add_argument(
"--checkpointing",
type=str2bool,
default=False,
help="Whether to checkpoint the model",
)
logging_config.add_argument(
"--checkpoint_path", type=str, default=None, help="Checkpointing path"
)
logging_config.add_argument(
"--checkpoint_freq",
type=int,
default=1,
help="Checkpointing every k logging time",
)
logging_config.add_argument(
"--checkpoint_metric",
type=str2dict,
default={"model/train/all/loss": "min"},
help=(
"Checkpointing metric (metric_name:mode), "
"e.g., `model/train/all/loss:min`"
),
)
logging_config.add_argument(
"--checkpoint_task_metrics",
type=str2dict,
default=None,
help=(
"Task specific checkpointing metric "
"(metric_name1:mode1,metric_name2:mode2)"
),
)
logging_config.add_argument(
"--checkpoint_runway",
type=float,
default=0,
help="Checkpointing runway (no checkpointing before k checkpointing unit)",
)
logging_config.add_argument(
"--clear_intermediate_checkpoints",
type=str2bool,
default=True,
help="Whether to clear intermediate checkpoints",
)
logging_config.add_argument(
"--clear_all_checkpoints",
type=str2bool,
default=False,
help="Whether to clear all checkpoints",
)
return parser
def parse_args_to_config(args: Namespace) -> Dict[str, Any]:
r"""Parse the arguments to config dict
Args:
args(Namespace): The parsed namespace from argument parser.
Returns:
dict: The config dict.
"""
config = {
"meta_config": {
"seed": args.seed,
"verbose": args.verbose,
"log_path": args.log_path,
},
"data_config": {
"min_data_len": args.min_data_len,
"max_data_len": args.max_data_len,
},
"model_config": {
"model_path": args.model_path,
"device": args.device,
"dataparallel": args.dataparallel,
},
"learner_config": {
"fp16": args.fp16,
"n_epochs": args.n_epochs,
"train_split": args.train_split,
"valid_split": args.valid_split,
"test_split": args.test_split,
"ignore_index": args.ignore_index,
"optimizer_config": {
"optimizer": args.optimizer,
"lr": args.lr,
"l2": args.l2,
"grad_clip": args.grad_clip,
"asgd_config": {
"lambd": args.asgd_lambd,
"alpha": args.asgd_alpha,
"t0": args.asgd_t0,
},
"adadelta_config": {"rho": args.adadelta_rho, "eps": args.adadelta_eps},
"adagrad_config": {
"lr_decay": args.adagrad_lr_decay,
"initial_accumulator_value": args.adagrad_initial_accumulator_value,
"eps": args.adagrad_eps,
},
"adam_config": {
"betas": args.adam_betas,
"amsgrad": args.adam_amsgrad,
"eps": args.adam_eps,
},
"adamw_config": {
"betas": args.adamw_betas,
"amsgrad": args.adamw_amsgrad,
"eps": args.adamw_eps,
},
"adamax_config": {"betas": args.adamax_betas, "eps": args.adamax_eps},
"lbfgs_config": {
"max_iter": args.lbfgs_max_iter,
"max_eval": args.lbfgs_max_eval,
"tolerance_grad": args.lbfgs_tolerance_grad,
"tolerance_change": args.lbfgs_tolerance_change,
"history_size": args.lbfgs_history_size,
"line_search_fn": args.lbfgs_line_search_fn,
},
"rms_prop_config": {
"alpha": args.rms_prop_alpha,
"eps": args.rms_prop_eps,
"momentum": args.rms_prop_momentum,
"centered": args.rms_prop_centered,
},
"r_prop_config": {
"etas": args.r_prop_etas,
"step_sizes": args.r_prop_step_sizes,
},
"sgd_config": {
"momentum": args.sgd_momentum,
"dampening": args.sgd_dampening,
"nesterov": args.sgd_nesterov,
},
"sparse_adam_config": {
"betas": args.sparse_adam_betas,
"eps": args.sparse_adam_eps,
},
"bert_adam_config": {
"betas": args.bert_adam_betas,
"eps": args.bert_adam_eps,
},
},
"lr_scheduler_config": {
"lr_scheduler": args.lr_scheduler,
"lr_scheduler_step_unit": args.lr_scheduler_step_unit,
"lr_scheduler_step_freq": args.lr_scheduler_step_freq,
"warmup_steps": args.warmup_steps,
"warmup_unit": args.warmup_unit,
"warmup_percentage": args.warmup_percentage,
"min_lr": args.min_lr,
"exponential_config": {"gamma": args.exponential_lr_scheduler_gamma},
"plateau_config": {
"metric": args.plateau_lr_scheduler_metric,
"mode": args.plateau_lr_scheduler_mode,
"factor": args.plateau_lr_scheduler_factor,
"patience": args.plateau_lr_scheduler_patience,
"threshold": args.plateau_lr_scheduler_threshold,
"threshold_mode": args.plateau_lr_scheduler_threshold_mode,
"cooldown": args.plateau_lr_scheduler_cooldown,
"eps": args.plateau_lr_scheduler_eps,
},
"step_config": {
"step_size": args.step_lr_scheduler_step_size,
"gamma": args.step_lr_scheduler_gamma,
"last_epoch": args.step_lr_scheduler_last_epoch,
},
"multi_step_config": {
"milestones": args.multi_step_lr_scheduler_milestones,
"gamma": args.multi_step_lr_scheduler_gamma,
"last_epoch": args.multi_step_lr_scheduler_last_epoch,
},
"cyclic_config": {
"base_lr": args.cyclic_lr_scheduler_base_lr,
"max_lr": args.cyclic_lr_scheduler_max_lr,
"step_size_up": args.cyclic_lr_scheduler_step_size_up,
"step_size_down": args.cyclic_lr_scheduler_step_size_down,
"mode": args.cyclic_lr_scheduler_mode,
"gamma": args.cyclic_lr_scheduler_gamma,
"scale_fn": None,
"scale_mode": args.cyclic_lr_scheduler_scale_mode,
"cycle_momentum": args.cyclic_lr_scheduler_cycle_momentum,
"base_momentum": args.cyclic_lr_scheduler_base_momentum,
"max_momentum": args.cyclic_lr_scheduler_max_momentum,
"last_epoch": args.cyclic_lr_scheduler_last_epoch,
},
"one_cycle_config": {
"max_lr": args.one_cycle_lr_scheduler_max_lr,
"pct_start": args.one_cycle_lr_scheduler_pct_start,
"anneal_strategy": args.one_cycle_lr_scheduler_anneal_strategy,
"cycle_momentum": args.one_cycle_lr_scheduler_cycle_momentum,
"base_momentum": args.one_cycle_lr_scheduler_base_momentum,
"max_momentum": args.one_cycle_lr_scheduler_max_momentum,
"div_factor": args.one_cycle_lr_scheduler_div_factor,
"final_div_factor": args.one_cycle_lr_scheduler_final_div_factor,
"last_epoch": args.one_cycle_lr_scheduler_last_epoch,
},
"cosine_annealing_config": {
"last_epoch": args.cosine_annealing_lr_scheduler_last_epoch
},
},
"task_scheduler_config": {
"task_scheduler": args.task_scheduler,
"sequential_scheduler_config": {
"fillup": args.sequential_scheduler_fillup
},
"round_robin_scheduler_config": {
"fillup": args.round_robin_scheduler_fillup
},
"mixed_scheduler_config": {"fillup": args.mixed_scheduler_fillup},
},
},
"logging_config": {
"counter_unit": args.counter_unit,
"evaluation_freq": args.evaluation_freq,
"writer_config": {"writer": args.writer, "verbose": True},
"checkpointing": args.checkpointing,
"checkpointer_config": {
"checkpoint_path": args.checkpoint_path,
"checkpoint_freq": args.checkpoint_freq,
"checkpoint_metric": args.checkpoint_metric,
"checkpoint_task_metrics": args.checkpoint_task_metrics,
"checkpoint_runway": args.checkpoint_runway,
"clear_intermediate_checkpoints": args.clear_intermediate_checkpoints,
"clear_all_checkpoints": args.clear_all_checkpoints,
},
},
}
return config
|
the-stack_0_8740 | #!/usr/bin/env python
import os
import scipy.io as sio
import glob
PYTHON_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(os.path.dirname(PYTHON_DIR), 'pmtkdataCopy')
def load_mat(matName):
"""look for the .mat file in pmtk3/pmtkdataCopy/
currently only support .mat files create by Matlab 5,6,7~7.2,
"""
try:
data = sio.loadmat(os.path.join(DATA_DIR, matName))
except NotImplementedError:
raise
except FileNotFoundError:
raise
return data
def generate_rst():
"""generate chX.rst in current working directory"""
cwd = os.getcwd()
demo_dir = os.path.join(cwd, 'demos')
chapters = os.listdir(demo_dir)
for chapter in chapters:
if not os.path.isdir(os.path.join(demo_dir, chapter)):
continue
reg_py = os.path.join(demo_dir, chapter, '*.py')
scripts = glob.glob(reg_py)
rst_file = chapter + '.rst'
rst_file = os.path.join(demo_dir, chapter, rst_file)
with open(rst_file, 'w') as f:
f.write(chapter)
f.write('\n========================================\n')
for script in scripts:
script_name = os.path.basename(script)
f.write('\n' + script_name[:-3])
f.write('\n----------------------------------------\n')
reg_png = os.path.join(demo_dir,
chapter,
script_name[:-3] + '*.png')
for img in glob.glob(reg_png):
img_name = os.path.basename(img)
f.write(".. image:: " + img_name + "\n")
f.write(".. literalinclude:: " + script_name + "\n")
if __name__ == '__main__':
generate_rst()
print("Finished generate chX.rst!")
|
the-stack_0_8742 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Astral Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import AstralTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to astrald, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(AstralTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes astrald to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# astrald's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that astrald requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for astrald to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# astrald will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
the-stack_0_8747 | #this was initiated by atom(conan)
#partially modified by opkr
import os
import math
from cereal import car, log
from common.params import Params
from selfdrive.car.hyundai.spdcontroller import SpdController
import common.log as trace1
from selfdrive.controls.lib.events import Events
EventName = car.CarEvent.EventName
LaneChangeState = log.LateralPlan.LaneChangeState
class Spdctrl(SpdController):
def __init__(self, CP=None):
super().__init__( CP )
self.cv_Raio = 0.4
self.cv_Dist = -5
self.steer_mode = ""
self.cruise_gap = 0.0
self.cut_in = False
self.map_enable = False
self.map_spdlimit_offset = 0
self.target_speed = 0
self.target_speed_camera = 0
self.target_speed_map = 0.0
self.target_speed_map_counter = 0
self.target_speed_map_counter1 = 0
self.target_speed_map_counter2 = 0
self.hesitant_status = False
self.hesitant_timer = 0
self.map_decel_only = False
self.map_spdlimit_offset = int(Params().get("OpkrSpeedLimitOffset", encoding="utf8"))
self.map_enabled = False
self.second = 0
def update_lead(self, sm, CS, dRel, yRel, vRel, CC):
self.map_decel_only = CS.out.cruiseState.modeSel == 4
plan = sm['longitudinalPlan']
dRele = plan.dRel1 #EON Lead
yRele = plan.yRel1 #EON Lead
vRele = plan.vRel1 * 3.6 + 0.5 #EON Lead
dRelef = plan.dRel2 #EON Lead
yRelef = plan.yRel2 #EON Lead
vRelef = plan.vRel2 * 3.6 + 0.5 #EON Lead
lead2_status = plan.status2
self.second += 1
if self.second > 100:
self.map_enabled = Params().get_bool("OpkrMapEnable")
self.second = 0
if self.map_enabled:
self.target_speed_camera = plan.targetSpeedCamera + round(plan.targetSpeedCamera*0.01*self.map_spdlimit_offset)
else:
self.target_speed_camera = CS.out.safetySign + round(CS.out.safetySign*0.01*self.map_spdlimit_offset)
if self.target_speed_camera <= 29:
self.map_enable = False
self.target_speed = 0
elif self.target_speed_camera > 29 and (plan.onSpeedControl if self.map_enabled else CS.on_speed_control):
self.target_speed = self.target_speed_camera
self.map_enable = True
else:
self.target_speed = 0
self.map_enable = False
lead_set_speed = int(round(self.cruise_set_speed_kph))
lead_wait_cmd = 300
dRel = 150
vRel = 0
dRel2 = 140
vRel2 = 0
#dRel, yRel, vRel = self.get_lead( sm, CS )
if 1 < dRele < 149:
dRel = int(dRele) # dRele(이온 차간간격)값 사용
vRel = int(vRele)
elif 1 < CS.lead_distance < 149:
dRel = int(CS.lead_distance) # CS.lead_distance(레이더 차간간격)값 사용
vRel = int(CS.lead_objspd)
else:
dRel = 150
vRel = 0
if 1 < dRelef < 140:
dRel2 = int(dRelef)
vRel2 = int(vRelef) # for cut-in detection??
dst_lead_distance = int(CS.clu_Vanz*self.cv_Raio) # 기준 유지 거리
dst_lead_distance2 = int(CS.clu_Vanz*0.4) # 기준 유지 거리
if dst_lead_distance > 100:
dst_lead_distance = 100
#elif dst_lead_distance < 15:
#dst_lead_distance = 15
if 1 < dRel < 149: #앞차와의 간격이 150미터 미만이면, 즉 앞차가 인식되면,
self.time_no_lean = 0
d_delta = dRel - dst_lead_distance # d_delta = 앞차간격(이온값) - 유지거리
lead_objspd = vRel # 선행차량 상대속도.
else:
d_delta = 0
lead_objspd = 0
if 1 < dRel2 < 140:
d_delta2 = dRel2 - dst_lead_distance2
else:
d_delta2 = 0
if CS.driverAcc_time and not self.map_decel_only: #운전자가 가속페달 밟으면 크루즈 설정속도를 현재속도+1로 동기화
if int(CS.VSetDis) < int(round(CS.clu_Vanz)) + 1:
lead_set_speed = int(round(CS.clu_Vanz)) + 1
self.seq_step_debug = "운전자가속"
lead_wait_cmd = 10
elif int(round(self.target_speed)) < int(CS.VSetDis) and self.map_enable and ((int(round(self.target_speed)) < int(round(self.cruise_set_speed_kph))) and self.target_speed != 0):
Events().add(EventName.camSpeedDown)
self.seq_step_debug = "맵기반감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif CC.res_speed != 0 and CC.res_speed < int(CS.VSetDis):
self.seq_step_debug = "RES속도조정"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
# 거리 유지 조건
elif d_delta < 0 or d_delta2 < 0 and not self.map_decel_only: # 기준유지거리(현재속도*0.4)보다 가까이 있게 된 상황
if (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and dRele - dRelef > 3 and lead2_status:
self.seq_step_debug = "끼어들기감지"
#lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -5)
self.cut_in = True
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-6) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.25 and int(CS.clu_Vanz) > 80:
self.seq_step_debug = "거리확보3"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-4) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.3 and int(CS.clu_Vanz) > 50:
self.seq_step_debug = "거리확보2"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-2) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.35 and int(CS.clu_Vanz) > 20:
self.seq_step_debug = "거리확보1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd <= 0 and self.cut_in == True and (int(CS.clu_Vanz)-3) <= int(CS.VSetDis):
self.seq_step_debug = "끼어들기감속중"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 10, -1)
elif lead_objspd < -30 or (dRel < 60 and CS.clu_Vanz > 60 and lead_objspd < -5) and (int(CS.clu_Vanz)-5) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-5"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -5)
self.cut_in = False
elif lead_objspd < -20 or (dRel < 80 and CS.clu_Vanz > 80 and lead_objspd < -5) and (int(CS.clu_Vanz)-4) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-4"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -4)
self.cut_in = False
elif lead_objspd < -10 and (int(CS.clu_Vanz)-3) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,-3"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -3)
self.cut_in = False
elif lead_objspd < 0 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 35, -1)
self.cut_in = False
elif lead_objspd > 3 and int(CS.clu_Vanz) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,앞차가속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 60, 1)
self.cut_in = False
elif lead_objspd >= 0 and int(CS.clu_Vanz) <= int(CS.VSetDis):
self.seq_step_debug = "기준내>=0,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 290, -1)
self.cut_in = False
else:
self.seq_step_debug = "거리유지"
self.cut_in = False
# 선행차량이 멀리 있는 상태에서 감속 조건
elif 20 <= dRel < 149 and lead_objspd < -20 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
if dRel >= 50:
self.seq_step_debug = "정차차량 감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -10)
elif dRel >= 30:
self.seq_step_debug = "정차차량 감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 20, -10)
elif self.cruise_set_speed_kph > int(round((CS.clu_Vanz))) and not self.map_decel_only: #이온설정속도가 차량속도보다 큰경우
self.cut_in = False
if 10 > dRel > 3 and lead_objspd <= 0 and 1 < int(CS.clu_Vanz) <= 7 and CS.VSetDis < 45 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "출발속도조정"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 7, 5)
elif 20 > dRel > 3 and lead_objspd > 5 and CS.clu_Vanz <= 25 and CS.VSetDis < 55 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,출발"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 110, 1)
#elif lead_objspd > 9 and CS.clu_Vanz > 20 and CS.VSetDis < 45: # 처음출발시 선행차량 급가속할 때 설정속도 많이 업
# self.seq_step_debug = "SS>VS,초가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 10, 5)
#elif lead_objspd > 8 and CS.clu_Vanz > 45 and CS.VSetDis < 60: # 중간속도에서 선행차량 급가속할 때 설정속도 많이 업
# self.seq_step_debug = "SS>VS,중가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
#elif lead_objspd > 7 and CS.clu_Vanz > 65 and CS.VSetDis < 80:
# self.seq_step_debug = "SS>VS,종가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
elif lead_objspd > 0 and int(CS.clu_Vanz//lead_objspd) >= int(CS.VSetDis//lead_objspd) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,++1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 1)
elif lead_objspd > 0 and int(CS.clu_Vanz)+lead_objspd >= int(CS.VSetDis) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0) and not self.hesitant_status:
self.seq_step_debug = "SS>VS,+1"
if int(CS.VSetDis) > int(CS.clu_Vanz)+14:
self.hesitant_status = True
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 1)
elif CS.clu_Vanz > 80 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.7 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>80,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(15, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 60 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.9 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>60,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(15, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 40 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*2.2 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>40,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(15, 50+(lead_objspd*2)), -1)
elif 60 > CS.clu_Vanz > 30 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*0.85 and 1 < dRel < 149:
self.seq_step_debug = "SS>VS,60>v>30,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(15, 150-(abs(lead_objspd**3))), -1)
elif 7 < int(CS.clu_Vanz) < 30 and lead_objspd < 0 and CS.VSetDis > 30:
self.seq_step_debug = "SS>VS,30이하"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 10, -5)
elif lead_objspd <= 0 and int(CS.clu_Vanz)+5 <= int(CS.VSetDis) and int(CS.clu_Vanz) > 40 and 1 < dRel < 149: # 앞차와 속도 같을 시 현재속도+5으로 크루즈설정속도 유지
self.seq_step_debug = "SS>VS,vRel<=0"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 25, -1)
elif d_delta == 0 and lead_objspd == 0 and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "선행차없음"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
elif d_delta == 0 and lead_objspd == 0 and self.cruise_set_speed_kph > int(CS.VSetDis) and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "점진가속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 30, 1)
elif lead_objspd == 0 and int(CS.clu_Vanz) == 0 and dRel <= 6:
self.seq_step_debug = "출발대기"
else:
self.seq_step_debug = "SS>VS,거리유지"
if self.hesitant_status and self.hesitant_timer > 150:
self.hesitant_status = False
self.hesitant_timer = 0
elif self.hesitant_status:
self.hesitant_timer += 1
elif lead_objspd >= 0 and CS.clu_Vanz >= int(CS.VSetDis) and int(CS.clu_Vanz * 0.5) < dRel < 149 and not self.map_decel_only:
self.cut_in = False
self.seq_step_debug = "속도유지"
elif lead_objspd < 0 and int(CS.clu_Vanz * 0.5) >= dRel > 1 and not self.map_decel_only:
self.cut_in = False
self.seq_step_debug = "일반감속,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 50, -1)
elif self.map_decel_only and self.cruise_set_speed_kph > int(round(CS.VSetDis)) and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "속도원복"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 10, 1)
else:
self.cut_in = False
self.seq_step_debug = "속도유지"
return lead_wait_cmd, lead_set_speed
def update_curv(self, CS, sm, curve_speed):
wait_time_cmd = 0
set_speed = self.cruise_set_speed_kph
# 2. 커브 감속.
#if self.cruise_set_speed_kph >= 100:
if CS.out.cruiseState.modeSel == 1 and sm['lateralPlan'].laneChangeState == LaneChangeState.off and not (CS.out.leftBlinker or CS.out.rightBlinker)and not self.map_decel_only:
if curve_speed < 40 and CS.clu_Vanz > 40 and CS.lead_distance >= 15:
set_speed = min(45, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.25))
self.seq_step_debug = "커브감속-5"
wait_time_cmd = 10
elif curve_speed < 60 and CS.clu_Vanz > 40 and CS.lead_distance >= 15:
set_speed = min(55, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.2))
self.seq_step_debug = "커브감속-4"
wait_time_cmd = 20
elif curve_speed < 70 and CS.clu_Vanz > 40 and CS.lead_distance >= 15:
set_speed = min(65, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.15))
self.seq_step_debug = "커브감속-3"
wait_time_cmd = 30
elif curve_speed < 80 and CS.clu_Vanz > 40 and CS.lead_distance >= 15:
set_speed = min(75, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.1))
self.seq_step_debug = "커브감속-2"
wait_time_cmd = 40
elif curve_speed < 90 and CS.clu_Vanz > 40 and CS.lead_distance >= 15:
set_speed = min(85, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.05))
self.seq_step_debug = "커브감속-1"
wait_time_cmd = 50
return wait_time_cmd, set_speed
def update_log(self, CS, set_speed, target_set_speed, long_wait_cmd ):
if CS.out.cruiseState.modeSel == 0:
self.steer_mode = "오파모드"
elif CS.out.cruiseState.modeSel == 1:
self.steer_mode = "차간+커브"
elif CS.out.cruiseState.modeSel == 2:
self.steer_mode = "차간ONLY"
elif CS.out.cruiseState.modeSel == 3:
self.steer_mode = "편도1차선"
elif CS.out.cruiseState.modeSel == 4:
self.steer_mode = "맵감속ONLY"
if self.cruise_gap != CS.cruiseGapSet:
self.cruise_gap = CS.cruiseGapSet
str3 = 'MODE={:s} VL={:03.0f}/{:03.0f} TM={:03.0f}/{:03.0f} TS={:03.0f}'.format( self.steer_mode, set_speed, CS.VSetDis, long_wait_cmd, self.long_curv_timer, int(round(self.target_speed)) )
str4 = ' RD=D:{:03.0f}/V:{:03.0f} CG={:1.0f} DG={:s}'.format( CS.lead_distance, CS.lead_objspd, self.cruise_gap, self.seq_step_debug )
str5 = str3 + str4
trace1.printf2( str5 )
|
the-stack_0_8748 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
monkycoind should be started with the command line arguments:
monkycoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
the-stack_0_8749 | import os
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
# get list of download urls from the database
data_url = "https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/"
r = requests.get(data_url)
soup = BeautifulSoup(r.text, features="html.parser")
urls = [link.get('href') for link in soup.findAll('a')]
# filter url by making sure urls are not directories or query links
urls = [url for url in urls if "/" not in url.split(".")[-1] and "?" not in url]
for i in urls:
print(i)
if not os.path.isdir("data"):
os.mkdir("data")
if not os.path.isdir("data"):
os.mkdir("data")
for url in tqdm(urls):
os.system(f"cd data && wget {data_url}{url} > /dev/null 2>&1")
os.system("gunzip data/*.gz") |
the-stack_0_8750 | # -*- coding: utf-8 -*-
"""
Not so simple tkinter based gui around the pdf2xlsx.do_it function.
"""
from tkinter import Tk, ttk, filedialog, messagebox, StringVar, Toplevel, END
import os
import shutil
from .managment import do_it, do_it2
from .config import config
__version__ = '0.2.0'
class ConfOption:
"""
This widget is used to place the configuration options to the ConfigWindow. It contains
a label to show what is the configuration and an entry with StringVar to provide override
possibility. The value of the config :class:`JsonDict` is converted to a string for the entry.
If the value of a configuration is a list, it is converted to a comma separated string.
:param Frame root: Tk parent frame
:param str key: Key to the "config" :class:`JsonDict`
:param int row: Parameter for grid window manager
"""
def __init__(self, root, key, row):
self.key = key
dict_value = config[key]
ttk.Label(root, text=dict_value['text']).grid(row=row, column=0, sticky='w')
self.sv = StringVar()
if isinstance(dict_value['value'], list):
self.sv.set(", ".join(map(str, dict_value['value'])))
else:
self.sv.set(str(dict_value['value']))
self.entry = ttk.Entry(root, textvariable=self.sv, width=54)
self.entry.grid(row=row, column=1, sticky='e')
if dict_value['conf_method'] == 'filedialog':
ttk.Button(root, text='Sel',
command=self.browse_callback,
width=4).grid(row=row, column=2, sticky='w')
def update_config(self):
"""
Write the current entry value to the configuration. The original type of the
config value is checked, and the string is converted to this value (int, list of
int, list of string...)
"""
if isinstance(config[self.key]['value'], list):
if isinstance(config[self.key]['value'][0], int):
config[self.key]['value'] = list(map(int, self.sv.get().split(', ')))
else:
config[self.key]['value'] = self.sv.get().split(', ')
elif isinstance(config[self.key]['value'], int):
config[self.key]['value'] = int(self.sv.get())
else:
config[self.key]['value'] = self.sv.get()
def browse_callback(self):
"""
Asks for the source zip file, the opened dialog filters for zip files by default
The src_entry attribute is updated based on selection
"""
path = filedialog.askopenfilename(initialdir='.\\',
title="Choose file...",)
self.entry.delete(0, END)
self.entry.insert(0, path)
class ConfigWindow:
"""
Sub window for settings. The window is hidden by default, when the user clicks to the settings
button it is activated. It contains the configuration options.
There are two buttons the Save ( which hides the window ), and the Accept, both of them updates
the configuration file. The configuration items are stored in a list.
:param master: Tk parent class
"""
def __init__(self, master):
self.master = master
self.window = Toplevel(self.master)
self.window.resizable(False, False)
self.window.withdraw()
self.window.protocol("WM_DELETE_WINDOW", self._on_closing)
self.window.title('Settings...')
self.conf_list = []
self.main_frame = ttk.Frame(self.window)
self.main_frame.pack(padx=5, pady=5)
self.main_frame.grid_columnconfigure(1, minsize=20, weight=1)
ttk.Label(self.main_frame, text='Configuration:').grid(row=0, column=0,
columnspan=2, sticky='w')
row = 1
for conf_element in config:
if config[conf_element]['Display'] is True:
self.conf_list.append(
ConfOption(root=self.main_frame, key=conf_element, row=row))
row += 1
ttk.Button(self.main_frame, text='Save',
command=self.save_callback).grid(row=row, column=0, sticky='e')
ttk.Button(self.main_frame, text='Accept',
command=self.accept_callback).grid(row=row, column=1, sticky='w')
def save_callback(self):
"""
Hides the ConfigWindow and updates and stores the configuration
"""
self.window.withdraw()
self.accept_callback()
def accept_callback(self):
"""
Goes through on every configuration item and updates them one by one. Stores the updated
configuration.
"""
for conf in self.conf_list:
conf.update_config()
config.store()
def _on_closing(self):
self.window.withdraw()
class PdfXlsxGui:
"""
Simple GUI which lets the user select the source file zip and the destination directory
for the xlsx file. Contains a file dialog for selecting the zip file to work with.
There is a button to start the conversion, and also a Settings button to open the
settings window
:param master: Tk parent class
"""
def __init__(self, master):
self.master = master
self.master.title('Convert Zip -> Pdf -> Xlsx')
self.master.resizable(False, False)
self.main_frame = ttk.Frame(self.master)
self.main_frame.pack(padx=5, pady=5)
self.option_list = ["zip pdf to xlsx", "order details"]
self.selected_task = StringVar(self.main_frame)
self.selected_task.set("zip Pdf to xlsx") # default value
self.box = ttk.Combobox(self.main_frame, textvariable=self.selected_task, values=self.option_list)
self.box.bind("<<ComboboxSelected>>", self.update_task)
self.box.grid(row=0, column=0, columnspan=2)
self.task_do = self.process_pdf
ttk.Label(self.main_frame, text='Source File:').grid(row=1, column=0, sticky='w')
self.src_entry = ttk.Entry(self.main_frame, width=54)
self.src_entry.grid(row=1, column=0, sticky='e')
self.src_entry.insert(0, '.\\src.zip')
ttk.Button(self.main_frame, text='Browse...',
command=self.browse_src_callback).grid(row=1, column=1, sticky='w')
ttk.Button(self.main_frame, text='Start conversion',
command=self.execute_task).grid(row=5, column=0, sticky='w')
ttk.Button(self.main_frame, text='Settings',
command=self.config_callback).grid(row=5, column=1, columnspan=1, sticky='e')
self.config_window = ConfigWindow(self.master)
self.filetypes = (("zip files", "*.zip"), ("all files", "*.*"))
def update_task(self, event):
print(event.widget.get())
if event.widget.get() == self.option_list[0]:
self.task_do = self.process_pdf
self.filetypes = (("zip files", "*.zip"), ("all files", "*.*"))
elif event.widget.get() == self.option_list[1]:
self.task_do = self.convert_xlsx
self.filetypes = (("xlsx files", "*.xlsx"), ("all files", "*.*"))
else:
self.task_do = self.unknown_task
def config_callback(self):
"""
Bring the configuration window up
"""
self.config_window.window.state('normal')
self.config_window.window.lift(self.master)
def browse_src_callback(self):
"""
Asks for the source zip file, the opened dialog filters for zip files by default
The src_entry attribute is updated based on selection
"""
path = filedialog.askopenfilename(initialdir=config['last_path']['value'],
title="Choose the Zip file...",
filetypes=self.filetypes)
config['last_path']['value'] = os.path.dirname(path)
config.store()
self.src_entry.delete(0, END)
self.src_entry.insert(0, path)
def execute_task(self):
self.task_do()
def process_pdf(self):
"""
Facade for the do_it function. Only the src file and destination dir is updated
the other parameters are left for defaults.
"""
try:
logger = do_it(src_name=self.src_entry.get(),
dst_dir=config['tmp_dir']['value'],
xlsx_name=config['xlsx_name']['value'],
tmp_dir=config['tmp_dir']['value'],
file_extension=config['file_extension']['value'])
# tmp_str = '{1} Invoices were found with the following number of Entries:\n{0!s}'
# messagebox.showinfo(title='Conversion Completed',
# message=tmp_str.format(logger, len(logger.invo_list)))
except PermissionError as exc:
messagebox.showerror('Exception', exc)
def convert_xlsx(self):
print("Convert those xlsx: {}".format(self.box.get()))
try:
logger = do_it2(src_name=self.src_entry.get(),
dst_dir=config['tmp_dir']['value'],
xlsx_name=config['xlsx_name']['value'],
tmp_dir=config['tmp_dir']['value'])
# tmp_str = '{1} Invoices were found with the following number of Entries:\n{0!s}'
# messagebox.showinfo(title='Conversion Completed',
# message=tmp_str.format(logger, len(logger.invo_list)))
except PermissionError as exc:
messagebox.showerror('Exception', exc)
def unknown_task(self):
print("Unknown task selected: {}".format(self.box.get()))
def main():
root = Tk()
def _post_clean_up():
try:
shutil.rmtree(config['tmp_dir']['value'])
except FileNotFoundError:
print("You did nothing, you dummy, why did you start me up???")
finally:
root.destroy()
root.protocol("WM_DELETE_WINDOW", _post_clean_up)
gui = PdfXlsxGui(root)
root.mainloop()
if __name__ == '__main__':
main()
|
the-stack_0_8752 | """Home Assistant control object."""
import asyncio
from ipaddress import IPv4Address
import logging
from pathlib import Path
import shutil
import tarfile
from tempfile import TemporaryDirectory
from typing import Optional
from uuid import UUID
from awesomeversion import AwesomeVersion, AwesomeVersionException
from securetar import atomic_contents_add, secure_path
import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..const import (
ATTR_ACCESS_TOKEN,
ATTR_AUDIO_INPUT,
ATTR_AUDIO_OUTPUT,
ATTR_BOOT,
ATTR_IMAGE,
ATTR_PORT,
ATTR_REFRESH_TOKEN,
ATTR_SSL,
ATTR_TYPE,
ATTR_UUID,
ATTR_VERSION,
ATTR_WAIT_BOOT,
ATTR_WATCHDOG,
FILE_HASSIO_HOMEASSISTANT,
BusEvent,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
ConfigurationFileError,
HomeAssistantError,
HomeAssistantWSError,
)
from ..hardware.const import PolicyGroup
from ..hardware.data import Device
from ..jobs.decorator import Job
from ..utils import remove_folder
from ..utils.common import FileConfiguration
from ..utils.json import read_json_file, write_json_file
from .api import HomeAssistantAPI
from .const import WSType
from .core import HomeAssistantCore
from .secrets import HomeAssistantSecrets
from .validate import SCHEMA_HASS_CONFIG
from .websocket import HomeAssistantWebSocket
_LOGGER: logging.Logger = logging.getLogger(__name__)
HOMEASSISTANT_BACKUP_EXCLUDE = [
"*.db-shm",
"*.corrupt.*",
"__pycache__/*",
"*.log",
"*.log.*",
"OZW_Log.txt",
]
class HomeAssistant(FileConfiguration, CoreSysAttributes):
"""Home Assistant core object for handle it."""
def __init__(self, coresys: CoreSys):
"""Initialize Home Assistant object."""
super().__init__(FILE_HASSIO_HOMEASSISTANT, SCHEMA_HASS_CONFIG)
self.coresys: CoreSys = coresys
self._api: HomeAssistantAPI = HomeAssistantAPI(coresys)
self._websocket: HomeAssistantWebSocket = HomeAssistantWebSocket(coresys)
self._core: HomeAssistantCore = HomeAssistantCore(coresys)
self._secrets: HomeAssistantSecrets = HomeAssistantSecrets(coresys)
@property
def api(self) -> HomeAssistantAPI:
"""Return API handler for core."""
return self._api
@property
def websocket(self) -> HomeAssistantWebSocket:
"""Return Websocket handler for core."""
return self._websocket
@property
def core(self) -> HomeAssistantCore:
"""Return Core handler for docker."""
return self._core
@property
def secrets(self) -> HomeAssistantSecrets:
"""Return Secrets Manager for core."""
return self._secrets
@property
def machine(self) -> str:
"""Return the system machines."""
return self.core.instance.machine
@property
def arch(self) -> str:
"""Return arch of running Home Assistant."""
return self.core.instance.arch
@property
def error_state(self) -> bool:
"""Return True if system is in error."""
return self.core.error_state
@property
def ip_address(self) -> IPv4Address:
"""Return IP of Home Assistant instance."""
return self.core.instance.ip_address
@property
def api_port(self) -> int:
"""Return network port to Home Assistant instance."""
return self._data[ATTR_PORT]
@api_port.setter
def api_port(self, value: int) -> None:
"""Set network port for Home Assistant instance."""
self._data[ATTR_PORT] = value
@property
def api_ssl(self) -> bool:
"""Return if we need ssl to Home Assistant instance."""
return self._data[ATTR_SSL]
@api_ssl.setter
def api_ssl(self, value: bool):
"""Set SSL for Home Assistant instance."""
self._data[ATTR_SSL] = value
@property
def api_url(self) -> str:
"""Return API url to Home Assistant."""
return (
f"{'https' if self.api_ssl else 'http'}://{self.ip_address}:{self.api_port}"
)
@property
def ws_url(self) -> str:
"""Return API url to Home Assistant."""
return f"{'wss' if self.api_ssl else 'ws'}://{self.ip_address}:{self.api_port}/api/websocket"
@property
def watchdog(self) -> bool:
"""Return True if the watchdog should protect Home Assistant."""
return self._data[ATTR_WATCHDOG]
@watchdog.setter
def watchdog(self, value: bool):
"""Return True if the watchdog should protect Home Assistant."""
self._data[ATTR_WATCHDOG] = value
@property
def wait_boot(self) -> int:
"""Return time to wait for Home Assistant startup."""
return self._data[ATTR_WAIT_BOOT]
@wait_boot.setter
def wait_boot(self, value: int):
"""Set time to wait for Home Assistant startup."""
self._data[ATTR_WAIT_BOOT] = value
@property
def latest_version(self) -> Optional[AwesomeVersion]:
"""Return last available version of Home Assistant."""
return self.sys_updater.version_homeassistant
@property
def image(self) -> str:
"""Return image name of the Home Assistant container."""
if self._data.get(ATTR_IMAGE):
return self._data[ATTR_IMAGE]
return f"ghcr.io/home-assistant/{self.sys_machine}-homeassistant"
@image.setter
def image(self, value: Optional[str]) -> None:
"""Set image name of Home Assistant container."""
self._data[ATTR_IMAGE] = value
@property
def version(self) -> Optional[AwesomeVersion]:
"""Return version of local version."""
return self._data.get(ATTR_VERSION)
@version.setter
def version(self, value: AwesomeVersion) -> None:
"""Set installed version."""
self._data[ATTR_VERSION] = value
@property
def boot(self) -> bool:
"""Return True if Home Assistant boot is enabled."""
return self._data[ATTR_BOOT]
@boot.setter
def boot(self, value: bool):
"""Set Home Assistant boot options."""
self._data[ATTR_BOOT] = value
@property
def uuid(self) -> UUID:
"""Return a UUID of this Home Assistant instance."""
return self._data[ATTR_UUID]
@property
def supervisor_token(self) -> Optional[str]:
"""Return an access token for the Supervisor API."""
return self._data.get(ATTR_ACCESS_TOKEN)
@supervisor_token.setter
def supervisor_token(self, value: str) -> None:
"""Set the access token for the Supervisor API."""
self._data[ATTR_ACCESS_TOKEN] = value
@property
def refresh_token(self) -> Optional[str]:
"""Return the refresh token to authenticate with Home Assistant."""
return self._data.get(ATTR_REFRESH_TOKEN)
@refresh_token.setter
def refresh_token(self, value: Optional[str]):
"""Set Home Assistant refresh_token."""
self._data[ATTR_REFRESH_TOKEN] = value
@property
def path_pulse(self):
"""Return path to asound config."""
return Path(self.sys_config.path_tmp, "homeassistant_pulse")
@property
def path_extern_pulse(self):
"""Return path to asound config for Docker."""
return Path(self.sys_config.path_extern_tmp, "homeassistant_pulse")
@property
def audio_output(self) -> Optional[str]:
"""Return a pulse profile for output or None."""
return self._data[ATTR_AUDIO_OUTPUT]
@audio_output.setter
def audio_output(self, value: Optional[str]):
"""Set audio output profile settings."""
self._data[ATTR_AUDIO_OUTPUT] = value
@property
def audio_input(self) -> Optional[str]:
"""Return pulse profile for input or None."""
return self._data[ATTR_AUDIO_INPUT]
@audio_input.setter
def audio_input(self, value: Optional[str]):
"""Set audio input settings."""
self._data[ATTR_AUDIO_INPUT] = value
@property
def need_update(self) -> bool:
"""Return true if a Home Assistant update is available."""
try:
return self.version < self.latest_version
except (AwesomeVersionException, TypeError):
return False
async def load(self) -> None:
"""Prepare Home Assistant object."""
await asyncio.wait([self.secrets.load(), self.core.load()])
# Register for events
self.sys_bus.register_event(BusEvent.HARDWARE_NEW_DEVICE, self._hardware_events)
def write_pulse(self):
"""Write asound config to file and return True on success."""
pulse_config = self.sys_plugins.audio.pulse_client(
input_profile=self.audio_input, output_profile=self.audio_output
)
# Cleanup wrong maps
if self.path_pulse.is_dir():
shutil.rmtree(self.path_pulse, ignore_errors=True)
# Write pulse config
try:
self.path_pulse.write_text(pulse_config, encoding="utf-8")
except OSError as err:
_LOGGER.error("Home Assistant can't write pulse/client.config: %s", err)
else:
_LOGGER.info("Update pulse/client.config: %s", self.path_pulse)
async def _hardware_events(self, device: Device) -> None:
"""Process hardware requests."""
if (
not self.sys_hardware.policy.is_match_cgroup(PolicyGroup.UART, device)
or not self.version
or self.version < "2021.9.0"
):
return
configuration = await self.sys_homeassistant.websocket.async_send_command(
{ATTR_TYPE: "get_config"}
)
if not configuration or "usb" not in configuration.get("components", []):
return
self.sys_homeassistant.websocket.send_message({ATTR_TYPE: "usb/scan"})
@Job()
async def backup(self, tar_file: tarfile.TarFile) -> None:
"""Backup Home Assistant Core config/ directory."""
# Let Home Assistant Core know we are about to backup
try:
await self.websocket.async_send_command({ATTR_TYPE: WSType.BACKUP_START})
except HomeAssistantWSError:
_LOGGER.warning(
"Preparing backup of Home Assistant Core failed. Check HA Core logs."
)
with TemporaryDirectory(dir=self.sys_config.path_tmp) as temp:
temp_path = Path(temp)
# Store local configs/state
try:
write_json_file(temp_path.joinpath("homeassistant.json"), self._data)
except ConfigurationFileError as err:
raise HomeAssistantError(
f"Can't save meta for Home Assistant Core: {err!s}", _LOGGER.error
) from err
# Backup data config folder
def _write_tarfile():
with tar_file as backup:
# Backup metadata
backup.add(temp, arcname=".")
# Backup data
atomic_contents_add(
backup,
self.sys_config.path_homeassistant,
excludes=HOMEASSISTANT_BACKUP_EXCLUDE,
arcname="data",
)
try:
_LOGGER.info("Backing up Home Assistant Core config folder")
await self.sys_run_in_executor(_write_tarfile)
_LOGGER.info("Backup Home Assistant Core config folder done")
finally:
try:
await self.sys_homeassistant.websocket.async_send_command(
{ATTR_TYPE: WSType.BACKUP_END}
)
except HomeAssistantWSError:
_LOGGER.warning(
"Error during Home Assistant Core backup. Check HA Core logs."
)
async def restore(self, tar_file: tarfile.TarFile) -> None:
"""Restore Home Assistant Core config/ directory."""
with TemporaryDirectory(dir=self.sys_config.path_tmp) as temp:
temp_path = Path(temp)
temp_data = temp_path.joinpath("data")
temp_meta = temp_path.joinpath("homeassistant.json")
# extract backup
def _extract_tarfile():
"""Extract tar backup."""
with tar_file as backup:
backup.extractall(path=temp_path, members=secure_path(backup))
try:
await self.sys_run_in_executor(_extract_tarfile)
except tarfile.TarError as err:
raise HomeAssistantError(
f"Can't read tarfile {tar_file}: {err}", _LOGGER.error
) from err
# Check old backup format v1
if not temp_data.exists():
temp_data = temp_path
# Restore data
def _restore_data():
"""Restore data."""
shutil.copytree(
temp_data, self.sys_config.path_homeassistant, symlinks=True
)
_LOGGER.info("Restore Home Assistant Core config folder")
await remove_folder(self.sys_config.path_homeassistant)
try:
await self.sys_run_in_executor(_restore_data)
except shutil.Error as err:
raise HomeAssistantError(
f"Can't restore origin data: {err}", _LOGGER.error
) from err
_LOGGER.info("Restore Home Assistant Core config folder done")
if not temp_meta.exists():
return
_LOGGER.info("Restore Home Assistant Core metadata")
# Read backup data
try:
data = read_json_file(temp_meta)
except ConfigurationFileError as err:
raise HomeAssistantError() from err
# Validate
try:
data = SCHEMA_HASS_CONFIG(data)
except vol.Invalid as err:
raise HomeAssistantError(
f"Can't validate backup data: {humanize_error(data, err)}",
_LOGGER.err,
) from err
# Restore metadata
for attr in (
ATTR_AUDIO_INPUT,
ATTR_AUDIO_OUTPUT,
ATTR_PORT,
ATTR_SSL,
ATTR_REFRESH_TOKEN,
ATTR_WATCHDOG,
ATTR_WAIT_BOOT,
):
self._data[attr] = data[attr]
|
the-stack_0_8753 | # Ana 1.
class bankAccount():
def __init__(self, ownerName, balance):
self.ownerName = ownerName
self.balance = balance
def bankAccountDetails(self):
print("Account Holder :", self.ownerName)
print("Available Balance :", self.balance)
def deposit(self):
depositMoney = int(input("Enter amount to be deposited : "))
self.balance += depositMoney
print("Available Net Balance :", self.balance)
def withdraw(self):
withdrawMoney = int(input("Enter amount to be Withdrawn : "))
if self.balance >= withdrawMoney:
self.balance -= withdrawMoney
print("Withdrawn Money :", withdrawMoney)
print("Avalable Balance :", self.balance)
print("Transaction Successful !!!")
else :
print("Insufficient Balance")
def bankingServices(self):
transaction = "n"
cashDeposit = "n"
cashWithdraw = "n"
transaction = input("Start the transaction [Y/N] - ")
while transaction.lower() == "y":
while cashDeposit.lower() != "y":
details.deposit()
cashDeposit = input("End the transaction [Y/N] - ")
while cashWithdraw.lower() != "y":
details.withdraw()
cashWithdraw = input("End the transaction [Y/N] - ")
print("Thankyou for using our banking services")
details = bankAccount("Abhi",5000)
details.bankAccountDetails()
details.deposit()
details.withdraw()
details.bankingServices()
# Ans 2.
import math
class cone(parameters):
def __init__(self, radius, height):
parameters.__init__(self, "Cone")
self.radius = radius
self.height = height
def volume(self):
print("Volume of cone :", math.pi * (self.radius * self.radius) * self.height // 3)
def surfaceArea(self):
print("Surface Area of Cone :", math.pi * self.radius * math.sqrt(self.radius * self.radius + self.height * self.height))
abc = cone(5,10)
abc.volume()
abc.surfaceArea() |
the-stack_0_8754 | import unittest
import nideconv
import numpy as np
from scipy import signal
def double_gamma_with_d(x, a1=6, a2=12, b1=0.9, b2=0.9, c=0.35, d1=5.4, d2=10.8):
return (x/(d1))**a1 * np.exp(-(x-d1)/b1) - c*(x/(d2))**a2 * np.exp(-(x-d2)/b2)
class ResponseFytterTest(unittest.TestCase):
"""Tests for ResponseFytter"""
def create_signals(self,
signal_sample_frequency=4,
event_1_gain=1,
event_2_gain=1,
event_1_sd=0,
event_2_sd=0,
noise_gain=1.5,
deconv_sample_frequency=4,
deconvolution_interval=[-5, 25]):
"""creates signals to be used for the deconvolution of
2 specific impulse response shapes, with covariates.
It's supposed to create a signal that's long enough to
result in testable outcomes even with moderate
amounts of noise.
"""
self.signal_sample_frequency = signal_sample_frequency
# deconvolution parameters
self.deconvolution_interval = deconvolution_interval
# create some exponentially distributed random ISI events (Dale, 1999)
# of which we will create and deconvolve responses.
period_durs = np.random.gamma(4.0, 8, size=1000)
events = period_durs.cumsum()
self.events_1, self.events_2 = events[0::2], events[1::2]
self.durations_1, self.durations_2 = np.ones(self.events_1.shape[0])/signal_sample_frequency, \
np.ones(self.events_2.shape[0])/signal_sample_frequency
#self.durations_1 -= 1e-5
#self.durations_2 -= 1e-5
#self.durations_1, self.durations_2 = None, None
# these events are scaled with their own underlying covariate.
# for instance, you could have a model-based variable that scales the signal on a per-trial basis.
self.events_gains_1 = event_1_gain * np.ones(len(self.events_1)) + \
np.random.randn(len(self.events_1)) * event_1_sd
self.events_gains_2 = event_2_gain * np.ones(len(self.events_2)) + \
np.random.randn(len(self.events_2)) * event_2_sd
times = np.arange(0, events.max()+45.0, 1.0 /
self.signal_sample_frequency)
event_1_in_times = np.array([((times > te) * (times < te+d)) * eg
for te, d, eg in zip(self.events_1, self.durations_1, self.events_gains_1)]).sum(axis=0)
event_2_in_times = np.array([((times > te) * (times < te+d)) * eg
for te, d, eg in zip(self.events_2, self.durations_2, self.events_gains_2)]).sum(axis=0)
# create hrfs
time_points_hrf = np.arange(0, 20, 1.0/self.signal_sample_frequency)
self.hrf_1 = double_gamma_with_d(
time_points_hrf, a1=4.5, a2=10, d1=7.0, d2=10.0)
self.hrf_2 = double_gamma_with_d(
time_points_hrf, a1=1.5, a2=10, d1=5.0, d2=10.0)
self.hrf_1 /= self.hrf_1.max()
self.hrf_2 /= self.hrf_2.max()
signal_1 = signal.convolve(event_1_in_times, self.hrf_1, 'full')[
:times.shape[0]]
signal_2 = signal.convolve(event_2_in_times, self.hrf_2, 'full')[
:times.shape[0]]
# combine the two signals with one another, z-score and add noise
self.input_data = signal_1 + signal_2
# input_data = (input_data - np.mean(input_data)) / input_data.std()
self.input_data += np.random.randn(
self.input_data.shape[0]) * noise_gain
def test_vanilla_deconvolve(self,
event_1_gain=1,
event_2_gain=1,
noise_gain=1.5,
signal_sample_frequency=4,
**kwargs):
"""The simplest of possible tests, two impulse response functions
with different shapes, both with gain = 1
"""
self.create_signals(signal_sample_frequency=signal_sample_frequency,
event_1_gain=event_1_gain,
event_2_gain=event_2_gain,
event_1_sd=0,
event_2_sd=0,
noise_gain=noise_gain)
self.rfy = nideconv.ResponseFitter(
input_signal=self.input_data,
sample_rate=self.signal_sample_frequency)
# first event type, no covariate
self.rfy.add_event(
event_name='1',
onset_times=self.events_1,
durations=self.durations_1,
# durations=None,
interval=self.deconvolution_interval,
**kwargs
)
# second
self.rfy.add_event(
event_name='2',
onset_times=self.events_2,
durations=self.durations_2,
# durations=None,
interval=self.deconvolution_interval,
**kwargs
)
self.rfy.regress()
#self.assertAlmostEqual(rfy.event_types['1'].timecourses['int'], event_1_gain)
#self.assertAlmostEqual(rfy.event_types['2'].timecourses['int'], event_2_gain)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_8760 | import os
from transformers import BertTokenizer
from utils import get_rank, mkdir, synchronize
class CustomBertTokenizer(BertTokenizer):
def __init__(self, *args, **kwargs):
super(CustomBertTokenizer, self).__init__(*args, **kwargs)
def decode(self, token_ids, skip_special_tokens=True,
clean_up_tokenization_spaces=True, end_flags=[]):
filtered_tokens = self.convert_ids_to_tokens(
token_ids,
skip_special_tokens=skip_special_tokens,
end_flags=end_flags)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(" " + token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = ''.join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def convert_ids_to_tokens(self, ids, skip_special_tokens=False, end_flags=[]):
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if skip_special_tokens and index in self.all_special_ids:
continue
if index in end_flags:
tokens.append('.')
break
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def get_tokenizer(config):
if get_rank() != 0:
synchronize()
pretrained_cache_dir = '.cache_uncased/'
bert_base_path = 'bert-base-uncased' # 30522
if not os.path.exists(pretrained_cache_dir):
mkdir(pretrained_cache_dir)
tokenizer = CustomBertTokenizer.from_pretrained(bert_base_path)
tokenizer.save_pretrained(save_directory=pretrained_cache_dir)
else:
tokenizer = CustomBertTokenizer.from_pretrained(pretrained_cache_dir)
if get_rank() == 0:
synchronize()
SEP = tokenizer.sep_token_id
PAD = tokenizer.pad_token_id
MASK = tokenizer.mask_token_id
EOS = tokenizer.convert_tokens_to_ids('.')
num_tokens = tokenizer.vocab_size
return tokenizer, SEP, EOS, MASK, PAD, num_tokens
|
the-stack_0_8762 | #!/usr/bin/python3
import sys
from collections import OrderedDict
from eth_typing import Hash32
from eth_utils import big_endian_to_int
import rlp
from Crypto.Hash import keccak
from rlp.sedes import BigEndianInt, big_endian_int, Binary, binary
from rlp import encode
from eth_utils import to_bytes, to_hex
from web3 import IPCProvider, Web3
_BYTES = 4 # bytes in word
DATASET_BYTES_INIT = 2**30 # bytes in dataset at genesis
DATASET_BYTES_GROWTH = 2**23 # dataset growth per epoch
CACHE_BYTES_INIT = 2**24 # bytes in cache at genesis
CACHE_BYTES_GROWTH = 2**17 # cache growth per epoch
CACHE_MULTIPLIER=1024 # Size of the DAG relative to the cache
EPOCH_LENGTH = 30000 # blocks per epoch
MIX_BYTES = 128 # width of mix
HASH_BYTES = 64 # hash length in bytes
DATASET_PARENTS = 256 # number of parents of each dataset element
CACHE_ROUNDS = 3 # number of rounds in cache production
ACCESSES = 64 # number of accesses in hashimoto loop
address = Binary.fixed_length(20, allow_empty=True)
hash32 = Binary.fixed_length(32)
uint256 = BigEndianInt(256)
trie_root = Binary.fixed_length(32, allow_empty=True)
class MiningBlockHeader(rlp.Serializable):
fields = [
('parent_hash', hash32),
('uncles_hash', hash32),
('coinbase', address),
('state_root', trie_root),
('transaction_root', trie_root),
('receipt_root', trie_root),
('bloom', uint256),
('difficulty', big_endian_int),
('block_number', big_endian_int),
('gas_limit', big_endian_int),
('gas_used', big_endian_int),
('timestamp', big_endian_int),
('extra_data', binary),
#('mix_hash', binary), we have removed these 2 fields because we want a mining block header only
#('nonce', Binary(8, allow_empty=True)
]
provider = Web3.IPCProvider('/home/chronic/TMP_Stuff/geth.ipc')
w3 = Web3(provider)
print(w3.isConnected())
blockNumber = int(sys.argv[1], 10)
myHeader = MiningBlockHeader(
parent_hash = to_bytes(int(w3.eth.getBlock(blockNumber).parentHash.hex(), 16)),
uncles_hash = to_bytes(int(w3.eth.getBlock(blockNumber).sha3Uncles.hex(), 16)),
coinbase = to_bytes(int(w3.eth.getBlock(blockNumber).miner, 16)),
state_root = to_bytes(int(w3.eth.getBlock(blockNumber).stateRoot.hex(), 16)),
transaction_root = to_bytes(int(w3.eth.getBlock(blockNumber).transactionsRoot.hex(), 16)),
receipt_root = to_bytes(int(w3.eth.getBlock(blockNumber).receiptsRoot.hex(), 16)),
bloom = int(w3.eth.getBlock(blockNumber).logsBloom.hex(), 16),
difficulty = w3.eth.getBlock(blockNumber).difficulty,
block_number = w3.eth.getBlock(blockNumber).number,
gas_limit = w3.eth.getBlock(blockNumber).gasLimit,
gas_used = w3.eth.getBlock(blockNumber).gasUsed,
timestamp = w3.eth.getBlock(blockNumber).timestamp,
extra_data = to_bytes(int(w3.eth.getBlock(blockNumber).extraData.hex(), 16)),
#mix_hash = to_bytes(int(w3.eth.getBlock(blockNumber).mixHash.hex(), 16)),
#nonce = to_bytes(int(w3.eth.getBlock(blockNumber).nonce.hex(), 16)),
)
from pyethash import hashimoto_light, mkcache_bytes
# Type annotation here is to ensure we don't accidentally use strings instead of bytes.
cache_by_epoch: 'OrderedDict[int, bytearray]' = OrderedDict() #here we cache by epoch order
CACHE_MAX_ITEMS = 10 #and limit the items to 10
def get_cache(block_number: int) -> bytes:
epoch_index = block_number // EPOCH_LENGTH #this is where we get the block number
# Get the cache if already generated, marking it as recently used
if epoch_index in cache_by_epoch:
c = cache_by_epoch.pop(epoch_index) # pop and append at end
cache_by_epoch[epoch_index] = c
return c
# Generate the cache if it was not already in memory
# Simulate requesting mkcache by block number: multiply index by epoch length
c = mkcache_bytes(epoch_index * EPOCH_LENGTH)
cache_by_epoch[epoch_index] = c #stores the cash bytes generated
return c
# Limit memory usage for cache
if len(cache_by_epoch) > CACHE_MAX_ITEMS: #this is related to the lenght
cache_by_epoch.popitem(last=False) # remove last recently accessed
#ref line88
return c
#now we will write the check proof of work funtion. We need here to check if the data of the blocks is according to the requirements
def check_pow(block_number: int,
mining_hash: Hash32,
mix_hash: Hash32,
nonce: bytes,
difficulty: int) -> None:
cache = get_cache(block_number) #we get cache by block number
mining_output = hashimoto_light(block_number,
cache,
mining_hash,
big_endian_to_int(nonce)) # MISTAKE not int_to_big_endian but the other way around
#big_endian_to_int(nonce)
#int_to_big_endian(nonce)) #this is the hashimoto light mining output. It takes block_number, cache, mining_hash, int_to_big_endian(nonce) and hash it
print("MIX Digest: ", mining_output[b'mix digest'])
print("MIX HASH: ", w3.eth.getBlock(block_number).mixHash.hex())
print("RESULT: ", mining_output[b'result'])
print("CONDITION: ", (2**256) // difficulty)
if mining_output[b'mix digest'] != mining_hash: #this is to say that if the mining digest is not equal to the mix hash, then...
return False
elif int_to_big_endian(mining_output[b'result']) <= (2**256 // difficulty): #to convert the result int integer and check if it meets the condition of being less or equal to 2^256 divided by the difficulty
return False
else:
return True #if it returns true, then all good! We could do more checks but this is enough for now. For additional checks see here https://github.com/ethereum/py-evm/blob/d553bd405bbf41a1da0c227a614baba7b43e9449/eth/consensus/pow.py
#the next section's objective is tomake sure that data is formated correctly and make sure we can get the proper hash and that the data is accurately fromated
block_number = blockNumber
myHash = "0x" + keccak.new(data=rlp.encode(myHeader), digest_bits=256).hexdigest()
mining_hash = to_bytes(int(myHash, 16))
mix_hash = to_bytes(int(w3.eth.getBlock(block_number).mixHash.hex(), 16))
nonce = to_bytes(int(w3.eth.getBlock(block_number).nonce.hex(), 16))
difficulty = myHeader.difficulty
check_pow(block_number, mining_hash, mix_hash, nonce, difficulty)
|
the-stack_0_8764 | """
Define application-wide configuration.
"""
import os
import pytz
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# DATABASE TIMEZONE. All datetimes are converted to this before being entered in the database
TIMEZONE = pytz.timezone("UTC")
|
the-stack_0_8767 | """
Tests shared by MaskedArray subclasses.
"""
import numpy as np
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base import BaseOpsUtil
class ComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
ser = pd.Series(data)
result = op(ser, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
# subclass will override to parametrize 'other'
def test_scalar(self, other, all_compare_operators, dtype):
op = self.get_op_from_name(all_compare_operators)
left = pd.array([1, 0, None], dtype=dtype)
result = op(left, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(left._data, other)
expected = pd.arrays.BooleanArray(values, left._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(left, pd.array([1, 0, None], dtype=dtype))
class NumericOps:
# Shared by IntegerArray and FloatingArray, not BooleanArray
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_array(self, all_compare_operators, dtype):
op = self.get_op_from_name(all_compare_operators)
left = pd.array([0, 1, 2, None, None, None], dtype=dtype)
right = pd.array([0, 1, None, 0, 1, None], dtype=dtype)
result = op(left, right)
values = op(left._data, right._data)
mask = left._mask | right._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
left, pd.array([0, 1, 2, None, None, None], dtype=dtype)
)
tm.assert_extension_array_equal(
right, pd.array([0, 1, None, 0, 1, None], dtype=dtype)
)
def test_compare_with_booleanarray(self, all_compare_operators, dtype):
op = self.get_op_from_name(all_compare_operators)
left = pd.array([True, False, None] * 3, dtype="boolean")
right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype)
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(left, other)
result = op(left, right)
tm.assert_extension_array_equal(result, expected)
# reversed op
expected = op(other, left)
result = op(right, left)
tm.assert_extension_array_equal(result, expected)
def test_compare_to_string(self, dtype):
# GH#28930
ser = pd.Series([1, None], dtype=dtype)
result = ser == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
|
the-stack_0_8769 |
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import rmm
from cuml.test.utils import array_equal, unit_param, quality_param, \
stress_param
from cuml.neighbors import NearestNeighbors as cuKNN
from sklearn.neighbors import NearestNeighbors as skKNN
from sklearn.datasets.samples_generator import make_blobs
import cudf
import pandas as pd
import numpy as np
import sklearn
import cuml
from cuml.common import has_scipy
def predict(neigh_ind, _y, n_neighbors):
import scipy.stats as stats
neigh_ind = neigh_ind.astype(np.int32)
ypred, count = stats.mode(_y[neigh_ind], axis=1)
return ypred.ravel(), count.ravel() * 1.0 / n_neighbors
def valid_metrics():
cuml_metrics = cuml.neighbors.VALID_METRICS["brute"]
sklearn_metrics = sklearn.neighbors.VALID_METRICS["brute"]
return [value for value in cuml_metrics if value in sklearn_metrics]
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("nrows", [500, 1000, 10000])
@pytest.mark.parametrize("ncols", [100, 1000])
@pytest.mark.parametrize("n_neighbors", [10, 50])
@pytest.mark.parametrize("n_clusters", [2, 10])
def test_neighborhood_predictions(nrows, ncols, n_neighbors, n_clusters,
datatype):
if not has_scipy():
pytest.skip('Skipping test_neighborhood_predictions because ' +
'Scipy is missing')
X, y = make_blobs(n_samples=nrows, centers=n_clusters,
n_features=ncols, random_state=0)
X = X.astype(np.float32)
if datatype == "dataframe":
X = cudf.DataFrame.from_gpu_matrix(rmm.to_device(X))
knn_cu = cuKNN()
knn_cu.fit(X)
neigh_ind = knn_cu.kneighbors(X, n_neighbors=n_neighbors,
return_distance=False)
if datatype == "dataframe":
assert isinstance(neigh_ind, cudf.DataFrame)
neigh_ind = neigh_ind.as_gpu_matrix().copy_to_host()
else:
assert isinstance(neigh_ind, np.ndarray)
labels, probs = predict(neigh_ind, y, n_neighbors)
assert array_equal(labels, y)
def test_return_dists():
n_samples = 50
n_feats = 50
k = 5
X, y = make_blobs(n_samples=n_samples,
n_features=n_feats, random_state=0)
knn_cu = cuKNN()
knn_cu.fit(X)
ret = knn_cu.kneighbors(X, k, return_distance=False)
assert not isinstance(ret, tuple)
assert ret.shape == (n_samples, k)
ret = knn_cu.kneighbors(X, k, return_distance=True)
assert isinstance(ret, tuple)
assert len(ret) == 2
@pytest.mark.parametrize('input_type', ['dataframe', 'ndarray'])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_feats', [unit_param(3), quality_param(100),
stress_param(1000)])
@pytest.mark.parametrize('k', [unit_param(3), quality_param(30),
stress_param(50)])
@pytest.mark.parametrize("metric", valid_metrics())
def test_cuml_against_sklearn(input_type, nrows, n_feats, k, metric):
X, _ = make_blobs(n_samples=nrows,
n_features=n_feats, random_state=0)
p = 5 # Testing 5-norm of the minkowski metric only
knn_sk = skKNN(metric=metric, p=p) # Testing
knn_sk.fit(X)
D_sk, I_sk = knn_sk.kneighbors(X, k)
X_orig = X
if input_type == "dataframe":
X = cudf.DataFrame.from_gpu_matrix(rmm.to_device(X))
knn_cu = cuKNN(metric=metric, p=p)
knn_cu.fit(X)
D_cuml, I_cuml = knn_cu.kneighbors(X, k)
if input_type == "dataframe":
assert isinstance(D_cuml, cudf.DataFrame)
assert isinstance(I_cuml, cudf.DataFrame)
D_cuml_arr = D_cuml.as_gpu_matrix().copy_to_host()
I_cuml_arr = I_cuml.as_gpu_matrix().copy_to_host()
else:
assert isinstance(D_cuml, np.ndarray)
assert isinstance(I_cuml, np.ndarray)
D_cuml_arr = D_cuml
I_cuml_arr = I_cuml
# Assert the cuml model was properly reverted
np.testing.assert_allclose(knn_cu.X_m.to_output("numpy"), X_orig,
atol=1e-5, rtol=1e-4)
# Allow a max relative diff of 10% and absolute diff of 1%
np.testing.assert_allclose(D_cuml_arr, D_sk, atol=1e-2,
rtol=1e-1)
assert I_cuml_arr.all() == I_sk.all()
def test_knn_fit_twice():
"""
Test that fitting a model twice does not fail.
This is necessary since the NearestNeighbors class
needs to free Cython allocated heap memory when
fit() is called more than once.
"""
n_samples = 1000
n_feats = 50
k = 5
X, y = make_blobs(n_samples=n_samples,
n_features=n_feats, random_state=0)
knn_cu = cuKNN()
knn_cu.fit(X)
knn_cu.fit(X)
knn_cu.kneighbors(X, k)
del knn_cu
@pytest.mark.parametrize('input_type', ['ndarray'])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_feats', [unit_param(20), quality_param(100),
stress_param(1000)])
def test_nn_downcast_fails(input_type, nrows, n_feats):
X, y = make_blobs(n_samples=nrows,
n_features=n_feats, random_state=0)
knn_cu = cuKNN()
if input_type == 'dataframe':
X_pd = pd.DataFrame({'fea%d' % i: X[0:, i] for i in range(X.shape[1])})
X_cudf = cudf.DataFrame.from_pandas(X_pd)
knn_cu.fit(X_cudf, convert_dtype=True)
with pytest.raises(Exception):
knn_cu.fit(X, convert_dtype=False)
# Test fit() fails when downcast corrupted data
X = np.array([[np.finfo(np.float32).max]], dtype=np.float64)
knn_cu = cuKNN()
with pytest.raises(Exception):
knn_cu.fit(X, convert_dtype=False)
|
the-stack_0_8770 | """Test converting quaternions to and from rotation matrices"""
from __future__ import division, print_function, absolute_import
import unittest
import numpy as np
import os
import rowan
zero = np.array([0, 0, 0, 0])
one = np.array([1, 0, 0, 0])
half = np.array([0.5, 0.5, 0.5, 0.5])
# Load test files
TESTDATA_FILENAME = os.path.join(
os.path.dirname(__file__),
'files/test_arrays.npz')
with np.load(TESTDATA_FILENAME) as data:
input1 = data['input1']
vector_inputs = data['vector_inputs']
class TestMatrix(unittest.TestCase):
"""Test rotation matrix conversions"""
def test_from_matrix(self):
self.assertTrue(np.all(
rowan.from_matrix(np.eye(3)) == one
))
with self.assertRaises(ValueError):
self.assertTrue(np.allclose(
rowan.from_matrix(
2*np.eye(3)
)
))
mat = np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
self.assertTrue(
np.logical_or(
np.allclose(rowan.from_matrix(mat), half),
np.allclose(rowan.from_matrix(mat), -half)
)
)
mat = np.array([[0, 1, 0],
[0, 0, -1],
[-1, 0, 0]])
v = np.copy(half)
v[3] *= -1
self.assertTrue(np.allclose(
rowan.from_matrix(mat), v
))
def test_to_matrix(self):
v = np.copy(zero)
with self.assertRaises(ZeroDivisionError):
rowan.to_matrix(v)
v = 2*np.ones(4)
with self.assertRaises(ValueError):
rowan.to_matrix(v)
v = np.copy(one)
self.assertTrue(np.all(
rowan.to_matrix(v) == np.eye(3)
))
v = np.copy(half)
self.assertTrue(np.allclose(
rowan.to_matrix(v),
np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
))
v[3] *= -1
self.assertTrue(np.allclose(
rowan.to_matrix(v),
np.array([[0, 1, 0],
[0, 0, -1],
[-1, 0, 0]])
))
def test_to_from_matrix(self):
# The equality is only guaranteed up to a sign
converted = rowan.from_matrix(
rowan.to_matrix(
input1))
self.assertTrue(
np.all(
np.logical_or(
np.isclose(input1 - converted, 0),
np.isclose(input1 + converted, 0),
)
)
)
def test_rotation(self):
quat_rotated = rowan.rotate(
input1,
vector_inputs)
matrices = rowan.to_matrix(
input1)
matrix_rotated = np.einsum(
'ijk,ki->ij',
matrices,
vector_inputs.T
)
self.assertTrue(np.allclose(matrix_rotated, quat_rotated))
|
the-stack_0_8772 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v2.proto.services import click_view_service_pb2_grpc
class ClickViewServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v2.services ClickViewService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
)
def __init__(self, channel=None, credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.',
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'click_view_service_stub': click_view_service_pb2_grpc.ClickViewServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def get_click_view(self):
"""Return the gRPC stub for :meth:`ClickViewServiceClient.get_click_view`.
Returns the requested click view in full detail.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['click_view_service_stub'].GetClickView |
the-stack_0_8775 | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
def mkTest():
m = Module('test')
clk = m.Reg('CLK')
rst = m.Reg('RST')
count = m.Reg('count', width=32)
m.Initial(
Systask('dumpfile', 'uut.vcd'),
Systask('dumpvars', 0, clk, rst, count),
)
m.Initial(
clk(0),
Forever(clk(Not(clk), ldelay=5)) # forever #5 CLK = ~CLK;
)
m.Initial(
rst(0),
Delay(100),
rst(1),
Delay(100),
rst(0),
Delay(1000),
count(0),
While(count < 1024)(
count( count + 1 ),
Event(Posedge(clk))
),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('')
print(verilog)
|
the-stack_0_8776 | class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
m = 10 ** 9 + 7
dp = [[0] * (target + 1) for _ in range(d + 1)]
dp[0][0] = 1
for i in range(1, d + 1):
for j in range(1, f + 1):
for k in range(j, target + 1):
dp[i][k] = (dp[i][k] + dp[i - 1][k - j]) % m
return dp[d][target]
d = 1
f = 6
target = 3
res = Solution().numRollsToTarget(d, f, target)
print(res) |
the-stack_0_8778 | from migen import *
from migen.genlib.cdc import MultiReg
from misoc.interconnect.csr import *
from migen.fhdl.decorators import ClockDomainsRenamer
class SDTriggerOutputDriver(Module, AutoCSR):
def __init__(self, trig_out, latch_in, posedge_in):
posedge_prev = Signal()
self.sync += [
posedge_prev.eq(posedge_in),
If(posedge_in & ~posedge_prev,
trig_out.eq(latch_in)
).Else(
trig_out.eq(0)
)
]
class SDTrigger(Module, AutoCSR):
"""Add-on core for generating trigger signals timed in sync with
the SDEmulator's data output completion.
"""
def __init__(self, sd_linklayer, pins):
self._latch = CSRStorage(len(pins))
self.clock_domains.cd_sd = ClockDomain(reset_less=True)
self.comb += self.cd_sd.clk.eq(sd_linklayer.cd_sd.clk)
sdcd_latch = Signal(len(pins))
self.specials += MultiReg(self._latch.storage, sdcd_latch, odomain="sd", n=3)
# Output circuit itself is entirely in SD clock domain
self.submodules.drv = ClockDomainsRenamer("sd")(
SDTriggerOutputDriver(pins, sdcd_latch, sd_linklayer.data_out_done))
|
the-stack_0_8779 | import urllib
import urllib2
url="http://licensing.research.ncsu.edu/technologies"
values1={"limit":200,"offset":0}
values2={"limit":200,"offset":200}
data1=urllib.urlencode(values1)
data2=urllib.urlencode(values2)
theurl1=url+"?"+data1
theurl2=url+"?"+data2
r1=urllib2.urlopen(theurl1)
r2=urllib2.urlopen(theurl2)
f1=open("1.html","w")
f1.write(r1.read())
f1.close()
f2=open("2.html","w")
f2.write(r2.read())
f2.close()
|
the-stack_0_8782 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Maths Keras layers
~~~~~~~~~~~~~~~~~~
"""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ipu.ops import math_ops as ipu_math_ops
class SerialDense(Layer):
"""Densely-connected NN layer where the dot operation is serialized to reduce
the size of this operation.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Given the `input` tensor with shape `[..., m, k]` and `kernel` tensor with
shape `[k, n]`, the matrix multiplication can be serialized as follows:
* Along the `m` dimension of `input`, by setting `serialization_dimension` to
`input_columns`.
* Along the `k` dimension of `input` and `kernel` by setting
`serialization_dimension` to `input_rows_kernel_columns`.
* Along `n` dimension of `kernel`, by setting `serialization_dimension` to
`kernel_rows`.
Example:
.. code-block:: python
# as first layer in a sequential model:
model = Sequential()
model.add(SerialDense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(SerialDense(32))
Arguments:
units: Positive integer, dimensionality of the output space.
serialization_factor: An integer indicating the number of smaller matrix
multiplies this operation is broken up into. Must divide the dimension
along which the operation is serialized on.
serialization_dimension: A string, must be one of `input_columns`,
`input_rows_kernel_columns` or `kernel_rows`. Indicates the dimension
along which the operation is serialzed on.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
serialization_factor,
serialization_dimension,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super().__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.serialization_factor = int(serialization_factor)
self.serialization_dimension = serialization_dimension
self.units = int(units) if not isinstance(units, int) else units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `SerialDense` layer with non-floating '
'point dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `SerialDense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight('kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight('bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs, **kwargs):
"""
Args:
inputs: The tensor to apply the dense weights to.
Returns:
The tensor resulting from applying the dense weights.
"""
if K.is_sparse(inputs):
raise TypeError(
'Unable to build `SerialDense` layer with sparse inputs.')
if self.serialization_factor < 1:
raise ValueError(
'serialization_factor has to be at least 1, but was {}.'.format(
self.serialization_factor))
inputs = math_ops.cast(inputs, self._compute_dtype)
# Transform the dimension name.
serialization_dimension = self.serialization_dimension
if serialization_dimension == "input_columns":
serialization_dimension = "a_columns"
elif serialization_dimension == "input_rows_kernel_columns":
serialization_dimension = "a_rows_b_columns"
elif serialization_dimension == "kernel_rows":
serialization_dimension = "b_rows"
else:
raise ValueError('Invalid serialization_dimension={}, expected one of: '
'\'input_columns\', \'input_rows_kernel_columns\', '
'\'kernel_rows\'.'.format(serialization_dimension))
outputs = ipu_math_ops.serialized_matmul(inputs, self.kernel,
self.serialization_factor,
serialization_dimension)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'serialization_factor': self.serialization_factor,
'serialization_dimension': self.serialization_dimension,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
the-stack_0_8783 | import copy
from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.http import require_GET
from django.views.generic import View
from memoized import memoized
from corehq.apps.accounting.decorators import always_allow_project_access
from corehq.apps.ota.rate_limiter import restore_rate_limiter
from dimagi.utils.web import get_ip, json_request, json_response
from corehq import feature_previews, privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.calculations import (
CALC_FNS,
CALC_ORDER,
CALCS,
dom_calc,
)
from corehq.apps.domain.decorators import (
domain_admin_required,
login_and_domain_required,
login_required,
require_superuser,
)
from corehq.apps.domain.forms import DomainInternalForm, TransferDomainForm
from corehq.apps.domain.models import Domain, TransferDomainRequest
from corehq.apps.domain.views.settings import (
BaseAdminProjectSettingsView,
BaseProjectSettingsView,
)
from corehq.apps.hqwebapp.decorators import use_jquery_ui, use_multiselect
from corehq.apps.hqwebapp.tasks import send_html_email_async, send_mail_async
from corehq.apps.hqwebapp.views import BasePageView
from corehq.apps.receiverwrapper.rate_limiter import submission_rate_limiter
from corehq.apps.toggle_ui.views import ToggleEditView
from corehq.apps.users.models import CouchUser
class BaseInternalDomainSettingsView(BaseProjectSettingsView):
strict_domain_fetching = True
@method_decorator(always_allow_project_access)
@method_decorator(login_and_domain_required)
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(BaseInternalDomainSettingsView, self).dispatch(request, *args, **kwargs)
@property
def main_context(self):
context = super(BaseInternalDomainSettingsView, self).main_context
context.update({
'project': self.domain_object,
})
return context
@property
def page_name(self):
return mark_safe("%s <small>Internal</small>" % self.page_title)
class EditInternalDomainInfoView(BaseInternalDomainSettingsView):
urlname = 'domain_internal_settings'
page_title = ugettext_lazy("Project Information")
template_name = 'domain/internal_settings.html'
strict_domain_fetching = True
@method_decorator(always_allow_project_access)
@method_decorator(login_and_domain_required)
@method_decorator(require_superuser)
@use_jquery_ui # datepicker
@use_multiselect
def dispatch(self, request, *args, **kwargs):
return super(BaseInternalDomainSettingsView, self).dispatch(request, *args, **kwargs)
@property
@memoized
def internal_settings_form(self):
can_edit_eula = toggles.CAN_EDIT_EULA.enabled(self.request.couch_user.username)
if self.request.method == 'POST':
return DomainInternalForm(self.request.domain, can_edit_eula, self.request.POST)
initial = {
'countries': self.domain_object.deployment.countries,
'is_test': self.domain_object.is_test,
'use_custom_auto_case_update_hour': 'Y' if self.domain_object.auto_case_update_hour else 'N',
'auto_case_update_hour': self.domain_object.auto_case_update_hour,
'use_custom_auto_case_update_limit': 'Y' if self.domain_object.auto_case_update_limit else 'N',
'auto_case_update_limit': self.domain_object.auto_case_update_limit,
'use_custom_odata_feed_limit': 'Y' if self.domain_object.odata_feed_limit else 'N',
'odata_feed_limit': self.domain_object.odata_feed_limit,
'granted_messaging_access': self.domain_object.granted_messaging_access,
}
internal_attrs = [
'sf_contract_id',
'sf_account_id',
'initiative',
'self_started',
'area',
'sub_area',
'organization_name',
'notes',
'phone_model',
'commtrack_domain',
'performance_threshold',
'experienced_threshold',
'amplifies_workers',
'amplifies_project',
'data_access_threshold',
'business_unit',
'workshop_region',
'partner_technical_competency',
'support_prioritization',
'gs_continued_involvement',
'technical_complexity',
'app_design_comments',
'training_materials',
'partner_comments',
'partner_contact',
'dimagi_contact',
]
if can_edit_eula:
internal_attrs += [
'custom_eula',
'can_use_data',
]
for attr in internal_attrs:
val = getattr(self.domain_object.internal, attr)
if isinstance(val, bool):
val = 'true' if val else 'false'
initial[attr] = val
return DomainInternalForm(self.request.domain, can_edit_eula, initial=initial)
@property
def page_context(self):
return {
'project': self.domain_object,
'form': self.internal_settings_form,
'areas': dict([(a["name"], a["sub_areas"]) for a in settings.INTERNAL_DATA["area"]]),
}
def send_handoff_email(self):
partner_contact = self.internal_settings_form.cleaned_data['partner_contact']
dimagi_contact = self.internal_settings_form.cleaned_data['dimagi_contact']
recipients = [partner_contact, dimagi_contact]
params = {'contact_name': CouchUser.get_by_username(dimagi_contact).human_friendly_name}
send_html_email_async.delay(
subject="Project Support Transition",
recipient=recipients,
html_content=render_to_string(
"domain/email/support_handoff.html", params),
text_content=render_to_string(
"domain/email/support_handoff.txt", params),
email_from=settings.SUPPORT_EMAIL,
)
messages.success(self.request,
_("Sent hand-off email to {}.").format(" and ".join(recipients)))
def post(self, request, *args, **kwargs):
if self.internal_settings_form.is_valid():
old_attrs = copy.copy(self.domain_object.internal)
self.internal_settings_form.save(self.domain_object)
eula_props_changed = (bool(old_attrs.custom_eula) != bool(self.domain_object.internal.custom_eula) or
bool(old_attrs.can_use_data) != bool(self.domain_object.internal.can_use_data))
if eula_props_changed and settings.EULA_CHANGE_EMAIL:
message = '\n'.join([
'{user} changed either the EULA or data sharing properties for domain {domain}.',
'',
'The properties changed were:',
'- Custom eula: {eula_old} --> {eula_new}',
'- Can use data: {can_use_data_old} --> {can_use_data_new}'
]).format(
user=self.request.couch_user.username,
domain=self.domain,
eula_old=old_attrs.custom_eula,
eula_new=self.domain_object.internal.custom_eula,
can_use_data_old=old_attrs.can_use_data,
can_use_data_new=self.domain_object.internal.can_use_data,
)
send_mail_async.delay(
'Custom EULA or data use flags changed for {}'.format(self.domain),
message, settings.DEFAULT_FROM_EMAIL, [settings.EULA_CHANGE_EMAIL]
)
messages.success(request,
_("The internal information for project %s was successfully updated!") % self.domain)
if self.internal_settings_form.cleaned_data['send_handoff_email']:
self.send_handoff_email()
return redirect(self.urlname, self.domain)
else:
messages.error(request, _(
"Your settings are not valid, see below for errors. Correct them and try again!"))
return self.get(request, *args, **kwargs)
class EditInternalCalculationsView(BaseInternalDomainSettingsView):
urlname = 'domain_internal_calculations'
page_title = ugettext_lazy("Calculated Properties")
template_name = 'domain/internal_calculations.html'
@method_decorator(always_allow_project_access)
@method_decorator(login_and_domain_required)
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(BaseInternalDomainSettingsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
return {
'calcs': CALCS,
'order': CALC_ORDER,
}
@method_decorator(always_allow_project_access, name='dispatch')
@method_decorator(require_superuser, name='dispatch')
class FlagsAndPrivilegesView(BaseAdminProjectSettingsView):
urlname = 'feature_flags_and_privileges'
page_title = ugettext_lazy("Feature Flags and Privileges")
template_name = 'domain/admin/flags_and_privileges.html'
def _get_toggles(self):
def _sort_key(toggle):
return (not (toggle['domain_enabled'] or toggle['user_enabled']),
toggle['tag_index'],
toggle['label'])
unsorted_toggles = [{
'slug': toggle.slug,
'label': toggle.label,
'description': toggle.description,
'help_link': toggle.help_link,
'tag': toggle.tag.name,
'tag_index': toggle.tag.index,
'tag_description': toggle.tag.description,
'tag_css_class': toggle.tag.css_class,
'has_domain_namespace': toggles.NAMESPACE_DOMAIN in toggle.namespaces,
'domain_enabled': toggle.enabled(self.domain, namespace=toggles.NAMESPACE_DOMAIN),
'user_enabled': toggle.enabled(self.request.couch_user.username,
namespace=toggles.NAMESPACE_USER),
} for toggle in toggles.all_toggles()]
return sorted(unsorted_toggles, key=_sort_key)
def _get_privileges(self):
return sorted([
(privileges.Titles.get_name_from_privilege(privilege),
domain_has_privilege(self.domain, privilege))
for privilege in privileges.MAX_PRIVILEGES
], key=lambda name_has: (not name_has[1], name_has[0]))
@property
def page_context(self):
return {
'toggles': self._get_toggles(),
'use_sql_backend': self.domain_object.use_sql_backend,
'privileges': self._get_privileges(),
}
@method_decorator(always_allow_project_access, name='dispatch')
@method_decorator(require_superuser, name='dispatch')
class ProjectLimitsView(BaseAdminProjectSettingsView):
urlname = 'internal_project_limits_summary'
page_title = ugettext_lazy("Project Limits")
template_name = 'domain/admin/project_limits.html'
@property
def page_context(self):
return get_project_limits_context([
('Submission Rate Limits', submission_rate_limiter),
('Restore Rate Limits', restore_rate_limiter),
], self.domain)
def get_project_limits_context(name_limiter_tuple_list, scope=None):
return {
'project_limits': [
(name, _get_rate_limits(scope, rate_limiter))
for (name, rate_limiter) in name_limiter_tuple_list
]
}
def _get_rate_limits(scope, rate_limiter):
return [
{'key': key, 'current_usage': int(current_usage), 'limit': int(limit),
'percent_usage': round(100 * current_usage / limit, 1)}
for key, current_usage, limit in rate_limiter.iter_rates(scope)
]
class TransferDomainView(BaseAdminProjectSettingsView):
urlname = 'transfer_domain_view'
page_title = ugettext_lazy("Transfer Project")
template_name = 'domain/admin/transfer_domain.html'
@property
@memoized
def active_transfer(self):
return TransferDomainRequest.get_active_transfer(self.domain,
self.request.user.username)
@property
@memoized
def transfer_domain_form(self):
return TransferDomainForm(self.domain,
self.request.user.username,
self.request.POST or None)
def get(self, request, *args, **kwargs):
if self.active_transfer:
self.template_name = 'domain/admin/transfer_domain_pending.html'
if request.GET.get('resend', None):
self.active_transfer.send_transfer_request()
messages.info(request,
_("Resent transfer request for project '{domain}'").format(domain=self.domain))
return super(TransferDomainView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = self.transfer_domain_form
if form.is_valid():
# Initiate domain transfer
transfer = form.save()
transfer.send_transfer_request()
return HttpResponseRedirect(self.page_url)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
@property
def page_context(self):
if self.active_transfer:
return {'transfer': self.active_transfer.as_dict()}
else:
return {'form': self.transfer_domain_form}
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
if not toggles.TRANSFER_DOMAIN.enabled(request.domain):
raise Http404()
return super(TransferDomainView, self).dispatch(request, *args, **kwargs)
class ActivateTransferDomainView(BasePageView):
urlname = 'activate_transfer_domain'
page_title = 'Activate Domain Transfer'
template_name = 'domain/activate_transfer_domain.html'
@property
@memoized
def active_transfer(self):
return TransferDomainRequest.get_by_guid(self.guid)
@property
def page_context(self):
if self.active_transfer:
return {'transfer': self.active_transfer.as_dict()}
else:
return {}
@property
def page_url(self):
return self.request.get_full_path()
def get(self, request, guid, *args, **kwargs):
self.guid = guid
if (self.active_transfer and
self.active_transfer.to_username != request.user.username and
not request.user.is_superuser):
return HttpResponseRedirect(reverse("no_permissions"))
return super(ActivateTransferDomainView, self).get(request, *args, **kwargs)
def post(self, request, guid, *args, **kwargs):
self.guid = guid
if not self.active_transfer:
raise Http404()
if self.active_transfer.to_username != request.user.username and not request.user.is_superuser:
return HttpResponseRedirect(reverse("no_permissions"))
self.active_transfer.transfer_domain(ip=get_ip(request))
messages.success(request, _("Successfully transferred ownership of project '{domain}'")
.format(domain=self.active_transfer.domain))
return HttpResponseRedirect(reverse('dashboard_default', args=[self.active_transfer.domain]))
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ActivateTransferDomainView, self).dispatch(*args, **kwargs)
class DeactivateTransferDomainView(View):
def post(self, request, guid, *args, **kwargs):
transfer = TransferDomainRequest.get_by_guid(guid)
if not transfer:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
if (transfer.to_username != request.user.username and
transfer.from_username != request.user.username and
not request.user.is_superuser):
return HttpResponseRedirect(reverse("no_permissions"))
transfer.active = False
transfer.save()
referer = request.META.get('HTTP_REFERER', '/')
# Do not want to send them back to the activate page
if referer.endswith(reverse('activate_transfer_domain', args=[guid])):
messages.info(request,
_("Declined ownership of project '{domain}'").format(domain=transfer.domain))
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect(referer)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(DeactivateTransferDomainView, self).dispatch(*args, **kwargs)
@login_and_domain_required
@require_superuser
@require_GET
def toggle_diff(request, domain):
params = json_request(request.GET)
other_domain = params.get('domain')
diff = []
if Domain.get_by_name(other_domain):
diff = [{
'slug': t.slug,
'label': t.label,
'url': reverse(ToggleEditView.urlname, args=[t.slug]),
'tag_name': _('Preview'),
'tag_css_class': 'default',
'tag_index': -1,
} for t in feature_previews.all_previews() if _can_copy_toggle(t, request.domain, other_domain)]
diff.extend([{
'slug': t.slug,
'label': t.label,
'url': reverse(ToggleEditView.urlname, args=[t.slug]),
'tag_name': t.tag.name,
'tag_css_class': t.tag.css_class,
'tag_index': t.tag.index,
} for t in toggles.all_toggles() if _can_copy_toggle(t, request.domain, other_domain)])
diff.sort(key=lambda x: (x['tag_index'], x['label']))
return json_response(diff)
def _can_copy_toggle(toggle, domain, other_domain):
return (
toggle.enabled(domain, toggles.NAMESPACE_DOMAIN)
and not toggle.enabled(other_domain, toggles.NAMESPACE_DOMAIN)
)
@login_and_domain_required
@require_superuser
def calculated_properties(request, domain):
calc_tag = request.GET.get("calc_tag", '').split('--')
extra_arg = calc_tag[1] if len(calc_tag) > 1 else ''
calc_tag = calc_tag[0]
if not calc_tag or calc_tag not in list(CALC_FNS):
data = {"error": 'This tag does not exist'}
else:
data = {"value": dom_calc(calc_tag, domain, extra_arg)}
return json_response(data)
|
the-stack_0_8786 | from .triangle_metric import *
from .triangle_condition_metric import *
class TriangleShapeMetric(TriangleMetric):
def __init__(self):
super(TriangleShapeMetric, self).__init__(
name='Triangle Shape',
dimension='1',
acceptable_range=Range(min=0.25, max=1),
normal_range=Range(min=0, max=1),
full_range=Range(min=0, max=1),
q_for_unit=1,
)
def eval(self, P, T):
q = TriangleConditionMetric().eval(P, T)
return torch.reciprocal(q)
|
the-stack_0_8788 | from flask import Flask, Response, request
import requests
import random
app = Flask(__name__)
@app.route('/chance', methods=['GET'])
def chance():
# Gets a shot
shot_response = requests.get("http://service-2:5001/shooter")
shot = (shot_response.text)
# Gets the dive
dive_response = requests.get("http://service-3:5002/goalie")
dive = (dive_response.text)
# Gets shot_dive
shot_dive = shot + "-" + dive
chance = None
if shot_dive == "Left-Left":
chance = "90%"
elif shot_dive == "Left-Middle":
chance = "100%"
elif shot_dive == "Left-Right":
chance = "44%"
elif shot_dive == "Middle-Left":
chance = "81%"
elif shot_dive == "Middle-Middle":
chance = "0%"
elif shot_dive == "Middle-Right":
chance = "89%"
elif shot_dive == "Right-Left":
chance = "63%"
elif shot_dive == "Right-Middle":
chance = "100%"
elif shot_dive == "Right-Right":
chance = "94%"
return Response(chance, mimetype="text/plain")
|
the-stack_0_8789 | # -*- coding: utf-8 -*-
from maya import mel
from maya import cmds
from . import lang
from . import common
import os
import json
import re
class WeightCopyPaste():
def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',
threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):
if viewmsg:
cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)
'''
ウェイトデータの保存、読み込み関数
mode→コピーするかペーストするか'copy'or'paste'
saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定
method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」
「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。
「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。
「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、
ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。
「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。
nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在
→barycentric、bylinearはMaya2016Extention2から利用可能
weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。
→Mayaコピー時にファイル名指定すると複数保存できないので注意。
threshold→nearest,barycentricの位置検索範囲
'''
self.skinMeshes = skinMeshes
self.saveName = saveName
self.method = method
self.weightFile = weightFile
self.threshold = threshold
self.engine = engine
self.memShapes = {}
self.target = tgt
self.pasteMode = {'index':1, 'nearest':3}
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.skinMeshes, list):
temp = self.skinMeshes
self.skinMeshes = []
self.skinMeshes.append(temp)
# ファイルパスを生成しておく
if path == 'default':
self.filePath = os.getenv('MAYA_APP_DIR') + os.sep +'Scripting_Files'+ os.sep + 'weight' + os.sep + self.saveName
elif path == 'project':
self.scene_path = os.sep.join(cmds.file(q=True, sceneName=True).split(os.sep)[:-1])
self.protect_path = os.path.join(self.scene_path, 'weight_protector')
try:
if not os.path.exists(self.protect_path):
os.makedirs(self.protect_path)
except Exception as e:
print(e.message)
return
self.filePath = self.protect_pat+os.sep + self.saveName
self.fileName = os.path.join(self.filePath, self.saveName + '.json')
self.apiName = os.path.join(self.filePath, self.saveName + '.skn')
# コピーかペーストをそれぞれ呼び出し
if mode == 'copy':
self.weightCopy()
if mode == 'paste':
self.weightPaste()
def weightPaste(self):
dummy = cmds.spaceLocator()
for skinMesh in self.skinMeshes:
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
meshName = str(weightFile).replace('|', '__pipe__')
if os.path.exists(self.fileName):
try:
with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
saveData = json.load(f) # ロード
# self.visibility = saveData['visibility']#セーブデータ読み込み
skinningMethod = saveData[';skinningMethod']
dropoffRate = saveData[';dropoffRate']
maintainMaxInfluences = saveData[';maintainMaxInfluences']
maxInfluences = saveData[';maxInfluences']
bindMethod = saveData[';bindMethod']
normalizeWeights = saveData[';normalizeWeights']
influences = saveData[';influences']
# 子のノードがトランスフォームならダミーに親子付けして退避
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')
influences = cmds.ls(influences, l=True, tr=True)
# バインド
dstSkinCluster = cmds.skinCluster(
skinMesh,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
dstSkinCluster = dstSkinCluster[0]
# 親子付けを戻す
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
except Exception as e:
print(e.message)
print('Error !! Skin bind failed : ' + skinMesh)
continue
else:
dstSkinCluster = dstSkinCluster[0]
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
if self.engine == 'maya':
files = os.listdir(self.filePath)
print(files)
if len(files) == 2:
for file in files:
name, ext = os.path.splitext(file)
if ext == '.xml':
xml_name = file
else:
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
xml_name = meshName + '.xml'
if os.path.isfile(self.filePath + os.sep + xml_name):
if self.method == 'index' or self.method == 'over':
cmds.deformerWeights(xml_name,
im=True,
method=self.method,
deformer=dstSkinCluster,
path=self.filePath + os.sep)
else:
cmds.deformerWeights(xml_name,
im=True,
deformer=dstSkinCluster,
method=self.method,
worldSpace=True,
positionTolerance=self.threshold,
path=self.filePath + os.sep)
cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)
print('Weight paste to : ' + str(skinMesh))
else:
print('Not exist seved weight XML file : ' + skinMesh)
# ダミー親削除
cmds.delete(dummy)
cmds.select(self.skinMeshes, r=True)
# ウェイト情報を保存する関数
def weightCopy(self):
saveData = {}
# 保存ディレクトリが無かったら作成
if not os.path.exists(self.filePath):
os.makedirs(os.path.dirname(self.filePath + os.sep)) # 末尾\\が必要なので注意
else: # ある場合は中身を削除
files = os.listdir(self.filePath)
if files is not None:
for file in files:
os.remove(self.filePath + os.sep + file)
skinFlag = False
all_influences = []
for skinMesh in self.skinMeshes:
try:
cmds.bakePartialHistory(skinMesh, ppt=True)
except:
pass
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったら次に移行
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
saveData[';skinningMethod'] = skinningMethod
saveData[';dropoffRate'] = dropoffRate
saveData[';maintainMaxInfluences'] = maintainMaxInfluences
saveData[';maxInfluences'] = maxInfluences
saveData[';bindMethod'] = bindMethod
saveData[';normalizeWeights'] = normalizeWeights
all_influences += influences
#saveData[';influences'] = influences
skinFlag = True
all_influences = list(set(all_influences))
saveData[';influences'] = all_influences
#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS
for skinMesh in self.skinMeshes:
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったらfor分の次に移行
srcSkinCluster = srcSkinCluster[0]
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
sub_influences = list(set(all_influences) - set(influences))
if sub_influences:
cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)
if self.engine == 'maya':
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + os.sep)
with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
json.dump(saveData, f)
def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):
'''
スキンウェイトの転送関数
転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド
・引数
skinMesh→転送元メッシュ(1個,リスト形式でも可)
transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)
transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue
logTransfer→ログ表示するかどうか
returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse
'''
massege01 = lang.Lang(
en=': It does not perform the transfer of weight because it is not a skin mesh.',
ja=u': スキンメッシュではないのでウェイトの転送を行いません'
).output()
massege02 = lang.Lang(
en='Transfer the weight:',
ja=u'ウェイトを転送:'
).output()
massege03 = lang.Lang(
en='Transfer bind influences:',
ja=u'バインド状態を転送:'
).output()
if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す
skinMesh = skinMesh[0] # リストを渡されたときのための保険
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)
if not srcSkinCluster:
if logTransfer:
print(skinMesh + massege01)
return False # スキンクラスタがなかったら関数抜ける
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード
# リストタイプじゃなかったらリストに変換する
if not isinstance(transferedMesh, list):
temp = transferedMesh
transferedMesh = []
transferedMesh.append(temp)
for dst in transferedMesh:
#子供のノード退避用ダミーペアレントを用意
dummy = common.TemporaryReparent().main(mode='create')
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')
shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')
if not shapes: # もしメッシュがなかったら
continue # 処理を中断して次のオブジェクトへ
# スキンクラスタの有無を取得
dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
# バインド
dstSkinCluster = cmds.skinCluster(
dst,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
if logTransfer:
print(massege03 + '[' + skinMesh + '] >>> [' + dst + ']')
dstSkinCluster = dstSkinCluster[0]
if transferWeight:
cmds.copySkinWeights(
ss=srcSkinCluster,
ds=dstSkinCluster,
surfaceAssociation='closestPoint',
influenceAssociation=['name', 'closestJoint', 'oneToOne'],
normalize=True,
noMirror=True
)
if logTransfer:
print(massege02 + '[' + skinMesh + '] >>> [' + dst + ']')
#親子付けを戻す
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')
#ダミーペアレントを削除
common.TemporaryReparent().main(dummyParent=dummy, mode='delete')
if returnInfluences:
return influences
else:
return True
def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):
'''
ウェイトシンメトリする関数
srcNode→反転元
dstNode→反転先
symWeight→ウェイトミラーするかどうか
'''
# スキンクラスタを取得
if srcNode is None:
return
srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')
if srcShapes:
srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')
# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー
if srcSkinCluster:
# バインド状態を転送する関数呼び出し
skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得
for skinJoint in skinJointAll:
# ジョイントラベル設定関数呼び出し
joint_label(skinJoint, visibility=False)
if symWeight is False or dstNode is None:
return
transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)
dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')
dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)
cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],
mirrorMode='YZ', surfaceAssociation='closestComponent',
influenceAssociation='label', normalize=True)
def load_joint_label_rules():
#ロードできなかった時の初期値
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
def_left_list_list = [start_l_list, mid_l_list, end_l_list]
def_right_list_list = [start_r_list, mid_r_list, end_r_list]
#左右対称設定ファイルからルールをロードする
dir_path = os.path.join(
os.getenv('MAYA_APP_DIR'),
'Scripting_Files')
start_file = dir_path+os.sep+'joint_rule_start.json'
middle_file = dir_path+os.sep+'joint_rule_middle.json'
end_file = dir_path+os.sep+'joint_rule_end.json'
save_files = [start_file, middle_file, end_file]
left_list_list = []
right_list_list = []
for i, save_file in enumerate(save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
left_list_list.append(l_list)
right_list_list.append(r_list)
except Exception as e:
print(e.message)
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
else:
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
return left_list_list, right_list_list
def joint_label(object, visibility=False):
'''
ジョイントラベル設定関数
object→オブジェクト、リスト形式可
visibility→ラベルの可視性、省略可能。デフォルトFalse。
'''
#ラベリングルールをロードしておく
left_list_list, right_list_list = load_joint_label_rules()
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
for skinJoint in object:
objTypeName = cmds.objectType(skinJoint)
if objTypeName == 'joint':
split_name = skinJoint.split('|')[-1]
# スケルトン名にLRが含まれているかどうかを判定
side = 0
side_name = ''
for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):
for j, lr_list in enumerate([l_list, r_list]):
for k, lr in enumerate(lr_list):
if i == 0:
if re.match(lr, split_name):
side = j + 1
if i == 1:
if re.search(lr, split_name):
side = j + 1
if i == 2:
if re.match(lr[::-1], split_name[::-1]):
side = j + 1
if side:#対象が見つかってたら全部抜ける
side_name = lr
break
if side:
break
if side:
break
#print('joint setting :', split_name, side, side_name)
# 左右のラベルを設定、どちらでもないときは中央
cmds.setAttr(skinJoint + '.side', side)
# ラベルタイプを”その他”に設定
cmds.setAttr(skinJoint + '.type', 18)
new_joint_name = split_name.replace(side_name.replace('.', ''), '')
# スケルトン名設定
cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')
# 可視性設定
cmds.setAttr(skinJoint + '.drawLabel', visibility)
else:
print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command')
#ウェイトのミュートをトグル
def toggle_mute_skinning():
msg01 = lang.Lang(
en='No mesh selection.\nWould you like to process all of mesh in this scene?.',
ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output()
msg02 = lang.Lang(en='Yes', ja=u'はい').output()
msg03 = lang.Lang(en='No', ja=u'いいえ').output()
msg04 = lang.Lang(
en='Skinning is disabled',
ja=u'スキニングは無効になりました') .output()
msg05 = lang.Lang(
en='Skinning is enabled',
ja=u'スキニングが有効になりました') .output()
cmds.selectMode(o=True)
objects = cmds.ls(sl=True, l=True)
ad_node = []
for node in objects:
children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')
ad_node += [node]+children
#print(len(ad_node))
objects = set(ad_node)
#print(len(objects))
if not objects:
all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)
if all_mesh == msg02:
objects = cmds.ls(type='transform')
if not objects:
return
mute_flag = 1
skin_list = []
for node in objects:
skin = cmds.ls(cmds.listHistory(node), type='skinCluster')
if not skin:
continue
skin_list.append(skin)
if cmds.getAttr(skin[0]+'.envelope') > 0:
mute_flag = 0
for skin in skin_list:
cmds.setAttr(skin[0]+'.envelope', mute_flag)
if mute_flag == 0:
cmds.confirmDialog(m=msg04)
if mute_flag == 1:
cmds.confirmDialog(m=msg05)
|
the-stack_0_8790 |
#Faça um programa que leia o ano de nascimento
#de um jovem e informe, de acordo com sua idade.
#Se ele ainda vai se alistar ao serviço militar
#se è a hora de se alistar
#Se ja passou do tempo do alistamento
#Seu programa tambem devera mostrar o tempo que
#falta ou se passou do prazo.
from datetime import date
anoNascimento=int(input("Digite o ano que voce nasceu com 4 digitos: "))
anoAtual=date.today().year
idade=anoAtual-anoNascimento
if idade>18:
saldo=idade-18
print("\nVoce tem {} anos em {}. Deve Procurar a Junta do Serviço Militar (JSM). Voce ja deveria ter se alistado ha {} ano(s).".format(idade,anoAtual,saldo))
ano=anoAtual-saldo
print("Voce deveria ter se alistado no ano de {}.".format(ano))
elif idade<18:
saldo=18-idade
print("\nVoce tem {} anos em {} e NAO deve se Alistar. Falta(m) {} ano(s) para o seu Alistamento.".format(idade,anoAtual,saldo))
ano=anoAtual+saldo
print("Voce deve se Alistar em {}.".format(ano))
else:
print("\nVoce tem {} anos em {}. E DEVE Fazer o Alistamento Militar ESTE ANO.".format(idade,anoAtual))
|
the-stack_0_8792 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from akg.utils import kernel_exec as utils
import numpy as np
from akg.ops.array import tile
from tests.common.tensorio import compare_tensor
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def tile_execute(shape, dtype, multiples, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = tile_compile(shape, dtype, multiples, attrs, kernel_name=kernel_name, tuning=t)
if t:
exp_output, inputs, output = gen_data(dtype, multiples, shape)
return mod, exp_output, (inputs, output)
else:
return mod
else:
mod = tile_compile(shape, dtype, multiples, attrs)
exp_output, inputs, output = gen_data(dtype, multiples, shape)
acu_output = utils.mod_launch(mod, [inputs, output], expect=exp_output)
rtol, atol = get_rtol_atol("tile", dtype)
return inputs, acu_output, exp_output, compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
def gen_data(dtype, multiples, shape):
inputs = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
exp_output = np.tile(inputs, multiples)
output = np.full(exp_output.shape, np.nan, dtype)
return exp_output, inputs, output
def tile_compile(shape, dtype, multiples, attrs, kernel_name="tile", tuning=False):
return utils.op_build_test(tile.tile, [shape], [dtype], [multiples], kernel_name=kernel_name, attrs=attrs, tuning=tuning)
|
the-stack_0_8795 | # NOTE: bad django practice but /ee specifically depends on /posthog so it should be fine
from datetime import timedelta
from typing import Any, Dict, List, Optional, Tuple
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework import serializers
from rest_framework.decorators import action
from rest_framework.request import Request
from rest_framework.response import Response
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.action import format_action_filter, format_entity_filter
from ee.clickhouse.models.cohort import format_filter_query
from ee.clickhouse.models.person import ClickhousePersonSerializer
from ee.clickhouse.models.property import parse_prop_clauses
from ee.clickhouse.queries.util import get_trunc_func_ch, parse_timestamps
from ee.clickhouse.sql.person import GET_LATEST_PERSON_SQL, PEOPLE_SQL, PEOPLE_THROUGH_DISTINCT_SQL, PERSON_TREND_SQL
from ee.clickhouse.sql.stickiness.stickiness_people import STICKINESS_PEOPLE_SQL
from posthog.api.action import ActionSerializer, ActionViewSet
from posthog.api.utils import get_target_entity
from posthog.constants import ENTITY_ID, ENTITY_TYPE, TREND_FILTER_TYPE_ACTIONS
from posthog.models.action import Action
from posthog.models.cohort import Cohort
from posthog.models.entity import Entity
from posthog.models.filters import Filter
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.property import Property
from posthog.models.team import Team
class ClickhouseActionSerializer(ActionSerializer):
is_calculating = serializers.SerializerMethodField()
def get_count(self, action: Action) -> Optional[int]:
if self.context.get("view") and self.context["view"].action != "list":
query, params = format_action_filter(action)
if query == "":
return None
return sync_execute(
"SELECT count(1) FROM events WHERE team_id = %(team_id)s AND {}".format(query),
{"team_id": action.team_id, **params},
)[0][0]
return None
def get_is_calculating(self, action: Action) -> bool:
return False
class ClickhouseActionsViewSet(ActionViewSet):
serializer_class = ClickhouseActionSerializer
# Don't calculate actions in Clickhouse as it's on the fly
def _calculate_action(self, action: Action) -> None:
pass
def list(self, request: Request, *args: Any, **kwargs: Any) -> Response:
actions = self.get_queryset()
actions_list: List[Dict[Any, Any]] = self.serializer_class(actions, many=True, context={"request": request}).data # type: ignore
return Response({"results": actions_list})
@action(methods=["GET"], detail=False)
def people(self, request: Request, *args: Any, **kwargs: Any) -> Response:
team = self.team
filter = Filter(request=request)
entity = get_target_entity(request)
# adhoc date handling. parsed differently with django orm
date_from = filter.date_from or timezone.now()
data = {}
if filter.interval == "month":
data.update(
{"date_to": (date_from + relativedelta(months=1) - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S")}
)
elif filter.interval == "week":
data.update({"date_to": (date_from + relativedelta(weeks=1)).strftime("%Y-%m-%d %H:%M:%S")})
elif filter.interval == "hour":
data.update({"date_to": date_from + timedelta(hours=1)})
elif filter.interval == "minute":
data.update({"date_to": date_from + timedelta(minutes=1)})
filter = Filter(data={**filter._data, **data})
current_url = request.get_full_path()
serialized_people = self._calculate_entity_people(team, entity, filter)
current_url = request.get_full_path()
next_url: Optional[str] = request.get_full_path()
offset = filter.offset
if len(serialized_people) > 100 and next_url:
if "offset" in next_url:
next_url = next_url[1:]
next_url = next_url.replace("offset=" + str(offset), "offset=" + str(offset + 100))
else:
next_url = request.build_absolute_uri(
"{}{}offset={}".format(next_url, "&" if "?" in next_url else "?", offset + 100)
)
else:
next_url = None
return Response(
{
"results": [{"people": serialized_people[0:100], "count": len(serialized_people[0:99])}],
"next": next_url,
"previous": current_url[1:],
}
)
def _calculate_entity_people(self, team: Team, entity: Entity, filter: Filter):
parsed_date_from, parsed_date_to, _ = parse_timestamps(filter=filter, team_id=team.pk)
entity_sql, entity_params = format_entity_filter(entity=entity)
person_filter = ""
person_filter_params: Dict[str, Any] = {}
if filter.breakdown_type == "cohort" and filter.breakdown_value != "all":
cohort = Cohort.objects.get(pk=filter.breakdown_value, team_id=team.pk)
person_filter, person_filter_params = format_filter_query(cohort)
person_filter = "AND distinct_id IN ({})".format(person_filter)
elif (
filter.breakdown_type == "person"
and isinstance(filter.breakdown, str)
and isinstance(filter.breakdown_value, str)
):
person_prop = Property(**{"key": filter.breakdown, "value": filter.breakdown_value, "type": "person"})
filter.properties.append(person_prop)
prop_filters, prop_filter_params = parse_prop_clauses(filter.properties, team.pk)
params: Dict = {"team_id": team.pk, **prop_filter_params, **entity_params, "offset": filter.offset}
content_sql = PERSON_TREND_SQL.format(
entity_filter=f"AND {entity_sql}",
parsed_date_from=parsed_date_from,
parsed_date_to=parsed_date_to,
filters=prop_filters,
breakdown_filter="",
person_filter=person_filter,
)
people = sync_execute(
PEOPLE_THROUGH_DISTINCT_SQL.format(
content_sql=content_sql, latest_person_sql=GET_LATEST_PERSON_SQL.format(query="")
),
{**params, **person_filter_params},
)
serialized_people = ClickhousePersonSerializer(people, many=True).data
return serialized_people
class LegacyClickhouseActionsViewSet(ClickhouseActionsViewSet):
legacy_team_compatibility = True
|
the-stack_0_8796 | """
Gateway for Binance Crypto Exchange.
"""
import urllib
import hashlib
import hmac
import time
from copy import copy
from datetime import datetime, timedelta
from enum import Enum
from threading import Lock
import pytz
from vnpy.api.rest import RestClient, Request
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Exchange,
Product,
Status,
OrderType,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.event import EVENT_TIMER
from vnpy.event import Event
REST_HOST = "https://www.binance.com"
WEBSOCKET_TRADE_HOST = "wss://stream.binance.com:9443/ws/"
WEBSOCKET_DATA_HOST = "wss://stream.binance.com:9443/stream?streams="
STATUS_BINANCE2VT = {
"NEW": Status.NOTTRADED,
"PARTIALLY_FILLED": Status.PARTTRADED,
"FILLED": Status.ALLTRADED,
"CANCELED": Status.CANCELLED,
"REJECTED": Status.REJECTED
}
ORDERTYPE_VT2BINANCE = {
OrderType.LIMIT: "LIMIT",
OrderType.MARKET: "MARKET"
}
ORDERTYPE_BINANCE2VT = {v: k for k, v in ORDERTYPE_VT2BINANCE.items()}
DIRECTION_VT2BINANCE = {
Direction.LONG: "BUY",
Direction.SHORT: "SELL"
}
DIRECTION_BINANCE2VT = {v: k for k, v in DIRECTION_VT2BINANCE.items()}
INTERVAL_VT2BINANCE = {
Interval.MINUTE: "1m",
Interval.HOUR: "1h",
Interval.DAILY: "1d",
}
TIMEDELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class Security(Enum):
NONE = 0
SIGNED = 1
API_KEY = 2
symbol_name_map = {}
class BinanceGateway(BaseGateway):
"""
VN Trader Gateway for Binance connection.
"""
default_setting = {
"key": "",
"secret": "",
"session_number": 3,
"proxy_host": "",
"proxy_port": 0,
}
exchanges = [Exchange.BINANCE]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "BINANCE")
self.trade_ws_api = BinanceTradeWebsocketApi(self)
self.market_ws_api = BinanceDataWebsocketApi(self)
self.rest_api = BinanceRestApi(self)
def connect(self, setting: dict):
""""""
key = setting["key"]
secret = setting["secret"]
session_number = setting["session_number"]
proxy_host = setting["proxy_host"]
proxy_port = setting["proxy_port"]
self.rest_api.connect(key, secret, session_number,
proxy_host, proxy_port)
self.market_ws_api.connect(proxy_host, proxy_port)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def subscribe(self, req: SubscribeRequest):
""""""
self.market_ws_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.rest_api.cancel_order(req)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.rest_api.query_history(req)
def close(self):
""""""
self.rest_api.stop()
self.trade_ws_api.stop()
self.market_ws_api.stop()
def process_timer_event(self, event: Event):
""""""
self.rest_api.keep_user_stream()
class BinanceRestApi(RestClient):
"""
BINANCE REST API
"""
def __init__(self, gateway: BinanceGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.trade_ws_api = self.gateway.trade_ws_api
self.key = ""
self.secret = ""
self.user_stream_key = ""
self.keep_alive_count = 0
self.recv_window = 5000
self.time_offset = 0
self.order_count = 1_000_000
self.order_count_lock = Lock()
self.connect_time = 0
def sign(self, request):
"""
Generate BINANCE signature.
"""
security = request.data["security"]
if security == Security.NONE:
request.data = None
return request
if request.params:
path = request.path + "?" + urllib.parse.urlencode(request.params)
else:
request.params = dict()
path = request.path
if security == Security.SIGNED:
timestamp = int(time.time() * 1000)
if self.time_offset > 0:
timestamp -= abs(self.time_offset)
elif self.time_offset < 0:
timestamp += abs(self.time_offset)
request.params["timestamp"] = timestamp
query = urllib.parse.urlencode(sorted(request.params.items()))
signature = hmac.new(self.secret, query.encode(
"utf-8"), hashlib.sha256).hexdigest()
query += "&signature={}".format(signature)
path = request.path + "?" + query
request.path = path
request.params = {}
request.data = {}
# Add headers
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-MBX-APIKEY": self.key
}
if security in [Security.SIGNED, Security.API_KEY]:
request.headers = headers
return request
def connect(
self,
key: str,
secret: str,
session_number: int,
proxy_host: str,
proxy_port: int
):
"""
Initialize connection to REST server.
"""
self.key = key
self.secret = secret.encode()
self.proxy_port = proxy_port
self.proxy_host = proxy_host
self.connect_time = (
int(datetime.now(CHINA_TZ).strftime("%y%m%d%H%M%S")) * self.order_count
)
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API啟動成功")
self.query_time()
self.query_account()
self.query_order()
self.query_contract()
self.start_user_stream()
def query_time(self):
""""""
data = {
"security": Security.NONE
}
path = "/api/v1/time"
return self.add_request(
"GET",
path,
callback=self.on_query_time,
data=data
)
def query_account(self):
""""""
data = {"security": Security.SIGNED}
self.add_request(
method="GET",
path="/api/v3/account",
callback=self.on_query_account,
data=data
)
def query_order(self):
""""""
data = {"security": Security.SIGNED}
self.add_request(
method="GET",
path="/api/v3/openOrders",
callback=self.on_query_order,
data=data
)
def query_contract(self):
""""""
data = {
"security": Security.NONE
}
self.add_request(
method="GET",
path="/api/v1/exchangeInfo",
callback=self.on_query_contract,
data=data
)
def _new_order_id(self):
""""""
with self.order_count_lock:
self.order_count += 1
return self.order_count
def send_order(self, req: OrderRequest):
""""""
orderid = "NKD8FYX4-" + str(self.connect_time + self._new_order_id())
order = req.create_order_data(
orderid,
self.gateway_name
)
self.gateway.on_order(order)
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol.upper(),
"timeInForce": "GTC",
"side": DIRECTION_VT2BINANCE[req.direction],
"type": ORDERTYPE_VT2BINANCE[req.type],
"price": str(req.price),
"quantity": str(req.volume),
"newClientOrderId": orderid,
"newOrderRespType": "ACK"
}
self.add_request(
method="POST",
path="/api/v3/order",
callback=self.on_send_order,
data=data,
params=params,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol.upper(),
"origClientOrderId": req.orderid
}
self.add_request(
method="DELETE",
path="/api/v3/order",
callback=self.on_cancel_order,
params=params,
data=data,
extra=req
)
def start_user_stream(self):
""""""
data = {
"security": Security.API_KEY
}
self.add_request(
method="POST",
path="/api/v1/userDataStream",
callback=self.on_start_user_stream,
data=data
)
def keep_user_stream(self):
""""""
self.keep_alive_count += 1
if self.keep_alive_count < 600:
return
self.keep_alive_count = 0
data = {
"security": Security.API_KEY
}
params = {
"listenKey": self.user_stream_key
}
self.add_request(
method="PUT",
path="/api/v1/userDataStream",
callback=self.on_keep_user_stream,
params=params,
data=data
)
def on_query_time(self, data, request):
""""""
local_time = int(time.time() * 1000)
server_time = int(data["serverTime"])
self.time_offset = local_time - server_time
def on_query_account(self, data, request):
""""""
for account_data in data["balances"]:
account = AccountData(
accountid=account_data["asset"],
balance=float(account_data["free"]) + float(account_data["locked"]),
frozen=float(account_data["locked"]),
gateway_name=self.gateway_name
)
if account.balance:
self.gateway.on_account(account)
self.gateway.write_log("賬戶資金查詢成功")
def on_query_order(self, data, request):
""""""
for d in data:
order = OrderData(
orderid=d["clientOrderId"],
symbol=d["symbol"].lower(),
exchange=Exchange.BINANCE,
price=float(d["price"]),
volume=float(d["origQty"]),
type=ORDERTYPE_BINANCE2VT[d["type"]],
direction=DIRECTION_BINANCE2VT[d["side"]],
traded=float(d["executedQty"]),
status=STATUS_BINANCE2VT.get(d["status"], None),
datetime=generate_datetime(d["time"]),
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
self.gateway.write_log("委託資訊查詢成功")
def on_query_contract(self, data, request):
""""""
for d in data["symbols"]:
base_currency = d["baseAsset"]
quote_currency = d["quoteAsset"]
name = f"{base_currency.upper()}/{quote_currency.upper()}"
pricetick = 1
min_volume = 1
for f in d["filters"]:
if f["filterType"] == "PRICE_FILTER":
pricetick = float(f["tickSize"])
elif f["filterType"] == "LOT_SIZE":
min_volume = float(f["stepSize"])
contract = ContractData(
symbol=d["symbol"].lower(),
exchange=Exchange.BINANCE,
name=name,
pricetick=pricetick,
size=1,
min_volume=min_volume,
product=Product.SPOT,
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
symbol_name_map[contract.symbol] = contract.name
self.gateway.write_log("合約資訊查詢成功")
def on_send_order(self, data, request):
""""""
pass
def on_send_order_failed(self, status_code: str, request: Request):
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"委託失敗,狀態碼:{status_code},資訊:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
):
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data, request):
""""""
pass
def on_start_user_stream(self, data, request):
""""""
self.user_stream_key = data["listenKey"]
self.keep_alive_count = 0
url = WEBSOCKET_TRADE_HOST + self.user_stream_key
self.trade_ws_api.connect(url, self.proxy_host, self.proxy_port)
def on_keep_user_stream(self, data, request):
""""""
pass
def query_history(self, req: HistoryRequest):
""""""
history = []
limit = 1000
start_time = int(datetime.timestamp(req.start))
while True:
# Create query params
params = {
"symbol": req.symbol.upper(),
"interval": INTERVAL_VT2BINANCE[req.interval],
"limit": limit,
"startTime": start_time * 1000, # convert to millisecond
}
# Add end time if specified
if req.end:
end_time = int(datetime.timestamp(req.end))
params["endTime"] = end_time * 1000 # convert to millisecond
# Get response from server
resp = self.request(
"GET",
"/api/v1/klines",
data={"security": Security.NONE},
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"獲取歷史資料失敗,狀態碼:{resp.status_code},資訊:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"獲取歷史資料為空,開始時間:{start_time}"
self.gateway.write_log(msg)
break
buf = []
for l in data:
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=generate_datetime(l[0]),
interval=req.interval,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=self.gateway_name
)
buf.append(bar)
history.extend(buf)
begin = buf[0].datetime
end = buf[-1].datetime
msg = f"獲取歷史資料成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Break if total data count less than limit (latest date collected)
if len(data) < limit:
break
# Update start time
start_dt = bar.datetime + TIMEDELTA_MAP[req.interval]
start_time = int(datetime.timestamp(start_dt))
return history
class BinanceTradeWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
def connect(self, url, proxy_host, proxy_port):
""""""
self.init(url, proxy_host, proxy_port)
self.start()
def on_connected(self):
""""""
self.gateway.write_log("交易Websocket API連線成功")
def on_packet(self, packet: dict): # type: (dict)->None
""""""
if packet["e"] == "outboundAccountInfo":
self.on_account(packet)
elif packet["e"] == "executionReport":
self.on_order(packet)
def on_account(self, packet):
""""""
for d in packet["B"]:
account = AccountData(
accountid=d["a"],
balance=float(d["f"]) + float(d["l"]),
frozen=float(d["l"]),
gateway_name=self.gateway_name
)
if account.balance:
self.gateway.on_account(account)
def on_order(self, packet: dict):
""""""
if packet["C"] == "":
orderid = packet["c"]
else:
orderid = packet["C"]
order = OrderData(
symbol=packet["s"].lower(),
exchange=Exchange.BINANCE,
orderid=orderid,
type=ORDERTYPE_BINANCE2VT[packet["o"]],
direction=DIRECTION_BINANCE2VT[packet["S"]],
price=float(packet["p"]),
volume=float(packet["q"]),
traded=float(packet["z"]),
status=STATUS_BINANCE2VT[packet["X"]],
datetime=generate_datetime(packet["O"]),
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
# Push trade event
trade_volume = float(packet["l"])
if not trade_volume:
return
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=packet["t"],
direction=order.direction,
price=float(packet["L"]),
volume=trade_volume,
datetime=generate_datetime(packet["T"]),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
class BinanceDataWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.ticks = {}
def connect(self, proxy_host: str, proxy_port: int):
""""""
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def on_connected(self):
""""""
self.gateway.write_log("行情Websocket API連線重新整理")
def subscribe(self, req: SubscribeRequest):
""""""
if req.symbol not in symbol_name_map:
self.gateway.write_log(f"找不到該合約程式碼{req.symbol}")
return
# Create tick buf data
tick = TickData(
symbol=req.symbol,
name=symbol_name_map.get(req.symbol, ""),
exchange=Exchange.BINANCE,
datetime=datetime.now(CHINA_TZ),
gateway_name=self.gateway_name,
)
self.ticks[req.symbol] = tick
# Close previous connection
if self._active:
self.stop()
self.join()
# Create new connection
channels = []
for ws_symbol in self.ticks.keys():
channels.append(ws_symbol + "@ticker")
channels.append(ws_symbol + "@depth5")
url = WEBSOCKET_DATA_HOST + "/".join(channels)
self.init(url, self.proxy_host, self.proxy_port)
self.start()
def on_packet(self, packet):
""""""
stream = packet["stream"]
data = packet["data"]
symbol, channel = stream.split("@")
tick = self.ticks[symbol]
if channel == "ticker":
tick.volume = float(data['v'])
tick.open_price = float(data['o'])
tick.high_price = float(data['h'])
tick.low_price = float(data['l'])
tick.last_price = float(data['c'])
tick.datetime = generate_datetime(float(data['E']))
else:
bids = data["bids"]
for n in range(5):
price, volume = bids[n]
tick.__setattr__("bid_price_" + str(n + 1), float(price))
tick.__setattr__("bid_volume_" + str(n + 1), float(volume))
asks = data["asks"]
for n in range(5):
price, volume = asks[n]
tick.__setattr__("ask_price_" + str(n + 1), float(price))
tick.__setattr__("ask_volume_" + str(n + 1), float(volume))
if tick.last_price:
self.gateway.on_tick(copy(tick))
def generate_datetime(timestamp: float) -> datetime:
""""""
dt = datetime.fromtimestamp(timestamp / 1000)
dt = dt.replace(tzinfo=CHINA_TZ)
return dt
|
the-stack_0_8798 | """Example minimal input plugin for the Mjolnir-Config-Template."""
# Local imports
import brokkr.pipeline.baseinput
class ExampleMinimalInput(brokkr.pipeline.baseinput.ValueInputStep):
def __init__(
self,
example_argument=True,
**value_input_kwargs):
super().__init__(binary_decoder=False, **value_input_kwargs)
# YOUR INIT LOGIC AND ARGUMENT HANDLING HERE
self._example_attribute = example_argument
def read_raw_data(self, input_data=None):
# YOUR DATA READING LOGIC HERE
if not self._example_attribute:
return None
raw_data = []
for data_type in self.data_types:
try:
raw_data_value = data_type.example_value
except Exception as e:
self.logger.error("%s occurred: %s", type(e).__name__, e)
raw_data_value = None
raw_data.append(raw_data_value)
return raw_data
|
the-stack_0_8799 | """
BALLAST: Builder Assistant to Lay out, Label and Arrange Spectra
Together
This is a simple program to combine and display spectra together.
"""
import sys
import os
import re
import argparse
import typing as tp
import configparser as cfg
from math import *
import numpy as np
import matplotlib.pyplot as plt
from estampes.base.spectrum import Spectrum
from estampes.tools.char import convert_expr
from estampes.visual.plotspec import SpecLayout
def fscale(expr: str, var: str) -> tp.Callable[[float], float]:
"""Returns a scaling function.
Analyzes the mathematical expression in `expr` and returns a
function compatible with `var`.
Parameters
----------
expr
Mathematical expression.
var
Variable of interest.
Returns
-------
function
Mathematical function
Raises
------
NameError
Unsupported mathematical function.
"""
try:
_expr = convert_expr(expr, var, natural=True)
except ValueError:
return NameError('Wrong mathematical functions detected.')
return eval('lambda {}: {}'.format(var, _expr))
def build_opts(parser: argparse.ArgumentParser) -> tp.NoReturn:
"""Builds commandline options.
Builds commandline options inside input `parser`.
Parameters
----------
parser
Parser to update.
"""
parser.add_argument('optfile', nargs='?',
help='Option file (INI style).')
# parser.add_argument('-o', '--output',
# help='Output file.')
parser.add_argument('-c', '--colors', action='append',
help='Spectral colors.')
msg = '''\
Colors of the spectra. By default, it follows the order of input files.
It is possible to change the order by putting a number followed by ":".
Ex. '3:Test' means that the label 'Test' is for the 3rd file (start at 1).
'r'/'e'/'0' refers to the reference data.
'''
parser.add_argument('-i', '--inpfile', action='append',
help='Input data file.')
msg = '''\
Labels for the legend. By default, it follows the order of input files.
It is possible to change the order by putting a number followed by ":".
Ex. '3:Test' means that the label 'Test' is for the 3rd file (start at 1).
'r'/'e'/'0' refers to the reference data.
'''
parser.add_argument('-l', '--label', action='append',
help=msg)
parser.add_argument('-r', '--refdata',
help='Reference spectrum file.')
def parse_args(args: tp.Sequence[str]) -> argparse.Namespace:
"""Parses arguments.
Parses commandline arguments
Parameters
----------
args
Commandline arguments
Returns
-------
:obj:`argparse.Namespace`
Object holding results as attributes
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
build_opts(parser)
return parser.parse_args(args)
def parse_subid(ident: str, ncols: int = 1
) -> tp.Tuple[tp.Union[int, tp.Tuple[int, int]],
tp.Union[int, tp.Tuple[int, int]]]:
"""Parses a subplot identifier.
Takes a subplot identifier and returns the corresponding row and
column.
Parameters
----------
ident
Identifier string.
ncols
Number of columns.
Returns
-------
int, tuple
Row index (starting from 1) or interval as tuple of integers.
int, tuple
Column index (starting from 1) or interval as tuple of integers.
Raises
------
ValueError
Unable to parse the subplot specification.
"""
def split_coord(coord: str) -> tp.Union[int, tp.Tuple[int, int]]:
"""Splits correctly a single coordinate."""
if not coord.strip():
return 1
else:
res = coord.split('-')
if len(res) == 2:
if not res[0].strip():
i = 1
else:
i = max(int(res[0]), 1)
if not res[1].strip():
j = -1
else:
j = max(int(res[1]), 1)
if i == j:
return i
else:
return (i, j)
else:
return max(int(res[0]), 1)
grid = ident.split(',')
if len(grid) == 2:
row = split_coord(grid[0])
col = split_coord(grid[1])
elif len(grid) == 1:
i = int(grid)
row = max(int(ceil(i/ncols)), 1)
col = max(i - (row-1)*ncols, 1)
else:
raise ValueError('Incorrect subplot specification.')
return row, col
def parse_inifile(fname: str
) -> tp.Tuple[tp.Dict[str, tp.Any],
tp.List[tp.List[SpecLayout]],
tp.Dict[str, tp.Any]]:
"""Parses INI file.
Parses a INI configuration file.
Parameters
----------
fname
Filename.
Returns
-------
dict
Figure data.
list
List of lists of spectrum layout parameters (grif format).
dict
Curves data.
Raises
------
FileNotFoundError
INI file or input file missing.
ValueError
Incorrect parameter.
"""
if not os.path.exists(fname):
raise FileNotFoundError('Missing INI file')
opts = cfg.ConfigParser()
opts.read(fname)
secs = {key.strip().lower(): key for key in opts.sections()}
figdat = {
'title': None,
'geom': None,
'shareaxes': False,
'subp': (1, 1),
'fname': None,
'show': True
}
if 'figure' in secs:
optsec = opts[secs['figure']]
figdat['title'] = optsec.get('maintitle', fallback=None)
figdat['fname'] = optsec.get('imagefile', fallback=None)
figdat['show'] = optsec.getboolean('showfigure', fallback=True)
res = optsec.get('mergeaxes', fallback='None').lower()
if res == 'none':
val = False
elif res == 'x':
val = 'X'
elif res == 'y':
val = 'Y'
elif res == 'all':
val = True
else:
raise ValueError('Unrecognized value for MergeAxes')
figdat['shareaxes'] = val
optkey = optsec.get('subplots', None)
if optkey is not None:
res = optkey.replace('(', '').replace(')', '').split(',')
if len(res) == 1:
val = (max(int(res[0]), 1), 1)
else:
val = (max(int(res[0]), 1), max(int(res[1]), 1))
else:
val = None
else:
val = None
if val is not None:
figdat['subp'] = val
# nrows and ncols are needed for the subplot specifications,
# they must not be changed
nrows, ncols = figdat['subp']
figdat['nums'] = nrows * ncols
# Check geometry now since it may be proportional to number of rows/cols
if 'figure' in secs:
optkey = optsec.get('geometry', None)
if optkey is not None:
res = optkey.replace('(', '').replace(')', '').split(',')
if len(res) == 1:
raise ValueError('Incorrect value for geometry.')
if '*' in res[0] or 'x' in res[0]:
val1 = float(res[0].replace('x', '').replace('*', ''))*ncols
else:
val1 = float(res[0])
if '*' in res[1] or 'x' in res[1]:
val2 = float(res[1].replace('x', '').replace('*', ''))*nrows
else:
val2 = float(res[1])
val = (val1, val2)
figdat['geom'] = val
spcdat = []
for _ in range(nrows):
spcdat.append([None for j in range(ncols)])
# The layout system works in a slightly different way than curves
# Besides using defaults, users can use the generic [layout] to define
# a common layout.
# We first build some default setup, which will be used for all others.
# The keys correspond to SpecLayout
spckeys = {
'title': ('title', ),
'xleft': ('xleft', 'xmin'),
'xright': ('xright', 'xmax'),
'ytop': ('ytop', 'ymax'),
'ybottom': ('ybottom', 'ymin'),
'xscale': ('xscale', ),
'yscale': ('yscale', ),
'xlabel': ('xlabel', ),
'ylabel': ('ylabel', ),
'legpos': ('legend', ),
'legcol': ('legend_cols', ),
'plottag': ('panel', )
}
spcbase = {
'title': None,
'xleft': None,
'xright': None,
'ytop': None,
'ybottom': None,
'xscale': 'linear',
'yscale': 'linear',
'xlabel': None,
'ylabel': None,
'legpos': 'best',
'legcol': 1,
'plottag': None
}
if 'layout' in secs:
optsec = opts[secs['layout']]
for key in spckeys:
for alias in spckeys[key]:
if alias in optsec:
spcbase[key] = optsec[alias]
break
for sec in secs:
if sec.startswith('layout'):
res = sec.split(':')
if len(res) == 2: # Ignore default case here
row, col = parse_subid(res[1], ncols)
if isinstance(row, tuple) or isinstance(col, tuple):
msg = 'Subplot ranges not supported in layout specs.'
raise ValueError(msg)
if row > nrows or col > ncols:
break
# correct to Python indexes
row -= 1
col -= 1
optsec = opts[secs[sec]]
val = {}
for key in spckeys:
for alias in spckeys[key]:
if alias in optsec:
val[key] = optsec[alias]
break
else:
val[key] = spcbase[key]
spcdat[row][col] = SpecLayout(**val)
for row in range(nrows):
for col in range(ncols):
if spcdat[row][col] is None:
spcdat[row][col] = SpecLayout(**spcbase)
# If axes merged, removed unnecessary labels
if figdat['shareaxes'] in ('Y', True) and ncols > 1:
for i in range(nrows):
for j in range(1, ncols):
spcdat[i][j].ylabel = None
if figdat['shareaxes'] in ('X', True) and nrows > 1:
for i in range(nrows-1):
for j in range(ncols):
spcdat[i][j].xlabel = None
curves = {}
for sec in secs:
if sec.startswith('curve'):
res = sec.split(':', maxsplit=1)
if res[0] != 'curve':
print(sec, 'will be ignored as a curve definition.')
continue # This is not a right keyword, ignore.
if len(res) != 2:
key = ' '
else:
key = secs[sec].split(':', maxsplit=1)[1].strip()
optsec = opts[secs[sec]]
# Check if curve to be shown
if not optsec.getboolean('show', fallback=True):
continue
# Subplot - check if subplot within range
res = optsec.get('subplot', fallback=None)
if res is not None:
val1 = parse_subid(res, ncols)
val = [[None, None], [None, None]]
for i, item in enumerate(val1):
if isinstance(item, int):
val[i] = (item-1, item-1)
else:
val[i][0] = item[0] - 1
if item[1] == -1:
if i == 0:
val[i][1] = nrows - 1
else:
val[i][1] = ncols - 1
else:
val[i][1] = item[1] - 1
row, col = val
if row[-1] >= nrows or col[-1] >= ncols:
continue
curves[key] = {'subplot': (row, col)}
else:
curves[key] = {'subplot': ((0, nrows-1), (0, ncols-1))}
if 'file' not in optsec:
print(f'WARNING: Missing file for "{sec}". Ignoring.')
continue
elif not os.path.exists(optsec['file']):
fmt = 'ERROR: File "{}" not found in "{}".'
print(fmt.format(optsec['file'], sec))
spc = optsec.get('spectroscopy', fallback=None)
lvl = optsec.get('level', fallback=None)
if spc is None or lvl is None:
raise ValueError('Spectroscopy not defined')
yid = optsec.get('yaxis', None)
if yid is not None:
yid = 'y' + yid
curves[key]['data'] = Spectrum(optsec['file'], spc, lvl, yid)
if optsec.getboolean('broaden', fallback=False):
func = optsec.get('function', None)
hwhm = optsec.getfloat('hwhm', fallback=10.)
xmin = optsec.getfloat('newxmin', fallback=None)
xmax = optsec.getfloat('newxmax', fallback=None)
xres = optsec.getfloat('grain', fallback=4.)
curves[key]['data'].set_broadening(hwhm, func, 'default', xres,
xmin, xmax)
vizdata = {}
for item in ('color', 'linestyle', 'linewidth'):
if optsec.get(item, False):
vizdata[item] = optsec.get(item, False)
if vizdata:
curves[key]['data'].set_display(**vizdata)
if optsec.get('label', None) is not None:
curves[key]['data'].label = optsec.get('label')
curves[key]['xshift'] = optsec.getfloat('xshift', fallback=None)
res = optsec.get('xscale', None)
if res is not None:
data = res.split(',')
try:
curves[key]['xscale'] = fscale(data[-1], 'x')
except NameError:
msg = 'Incorrect scaling factor for X'
raise ValueError(msg) from None
if len(data) > 1:
val = data[0].lower()
if val in ('rel', 'relative'):
curves[key]['xrelscale'] = True
elif val in ('abs', 'absolute'):
curves[key]['xrelscale'] = False
else:
msg = 'Incorrect scaling method for X'
raise ValueError(msg)
else:
curves[key]['xrelscale'] = False
else:
curves[key]['xscale'] = None
res = optsec.get('yshift', None)
if res is not None:
try:
val = float(res)
except ValueError:
if res.lower() in ('base', 'baseline'):
val = 'base'
else:
msg = 'Unsupported value for YShift'
raise ValueError(msg) from None
else:
val = None
curves[key]['yshift'] = val
res = optsec.get('yscale', None)
if res is not None:
data = res.split(',')
try:
curves[key]['yscale'] = fscale(data[-1], 'y')
except NameError:
msg = 'Incorrect scaling factor for Y'
raise ValueError(msg) from None
if len(data) > 1:
val = data[0].lower()
if val in ('rel', 'relative'):
curves[key]['yrelscale'] = True
elif val in ('abs', 'absolute'):
curves[key]['yrelscale'] = False
else:
msg = 'Incorrect scaling method for Y'
raise ValueError(msg)
else:
curves[key]['yrelscale'] = True
else:
curves[key]['yscale'] = None
curves[key]['ynorm'] = optsec.getboolean('normalize',
fallback=False)
if 'outputfile' in optsec:
curves[key]['outfile'] = \
optsec.get('outputfile').format(curve=key)
return figdat, spcdat, curves
def main() -> tp.NoReturn:
"""Main function.
"""
args = parse_args(sys.argv[1:])
if not args.inpfile and not args.optfile:
print('ERROR: Missing files or option file.')
sys.exit(2)
elif args.inpfile and args.optfile:
msg = 'ERROR: Option file and single files cannot be treated' \
+ ' together'
print(msg)
sys.exit(2)
elif args.inpfile:
print('ERROR: Files in input not yet supported')
else:
figdata, spcdata, curves = parse_inifile(args.optfile)
nrows, ncols = figdata['subp']
y0lines = np.full((nrows, ncols), False)
pars = {'tight_layout': True}
res = figdata['shareaxes']
if res == 'X' or res is True:
pars['sharex'] = True
if 'gridspec_kw' not in pars:
pars['gridspec_kw'] = {}
pars['gridspec_kw']['hspace'] = 0.0
if res == 'Y' or res is True:
pars['sharey'] = True
if 'gridspec_kw' not in pars:
pars['gridspec_kw'] = {}
pars['gridspec_kw']['wspace'] = 0.0
fig, subp = plt.subplots(nrows, ncols, **pars)
if figdata['geom'] is not None:
fig.set_size_inches(figdata['geom'])
# Build the curves, one at a time and then include in all relevant
# plot to avoid multiple iterations of heavy operations like broaden.
for idcurve, key in enumerate(curves):
xaxis = np.array(curves[key]['data'].xaxis)
if curves[key]['xscale'] is not None:
if curves[key]['xrelscale']:
shift = min(xaxis, key=lambda x: abs(x))
xaxis -= shift
func = np.vectorize(curves[key]['xscale'])
xaxis = func(xaxis)
if curves[key]['xrelscale']:
xaxis += func(shift)
if curves[key]['xshift'] is not None:
xaxis += curves[key]['xshift']
yaxis = np.array(curves[key]['data'].yaxis)
ymin = np.min(yaxis)
ymax = np.max(yaxis)
add_y0 = ymin*ymax < 0 and (
min(abs(ymin), ymax)/max(abs(ymin), ymax) > .1)
if curves[key]['yscale'] is not None:
if curves[key]['yrelscale']:
shift = min(yaxis, key=lambda x: abs(x))
yaxis -= shift
func = np.vectorize(curves[key]['yscale'])
yaxis = func(yaxis)
if curves[key]['yrelscale']:
yaxis += func(shift)
if curves[key]['ynorm']:
yshift = min(yaxis, key=lambda x: abs(x))
yaxis -= yshift
ymax = np.max(np.abs(yaxis))
yaxis /= ymax
yaxis += yshift/ymax
if curves[key]['yshift'] is not None:
if curves[key]['yshift'] == 'base':
if ymin*ymax >= 0:
if ymin >= 0:
yshift = - ymin
else:
yshift = + ymax
else:
yshift = 0
else:
yshift = curves[key]['yshift']
yaxis += yshift
stick = curves[key]['data'].get_broadening('func') == 'stick'
if 'outfile' in curves[key]:
fmt = '{:12.5f}, {:15.6e}\n'
with open(curves[key]['outfile'], 'w') as fobj:
for i in range(len(xaxis)):
fobj.write(fmt.format(xaxis[i], yaxis[i]))
data = {}
if curves[key]['data'].label is not None:
data['label'] = curves[key]['data'].label
if curves[key]['data'].linecolor is not None:
data['color'] = curves[key]['data'].linecolor
elif stick:
# stick is done with vertical lines, always black by default
# For this reason, we set a color. Otherwise, let the normal
# plotting tools select automatically.
data['color'] = 'C{:d}'.format(idcurve)
if curves[key]['data'].linewidth is not None:
data['linewidth'] = curves[key]['data'].linewidth
if not stick and curves[key]['data'].linestyle is not None:
data['linestyle'] = curves[key]['data'].linestyle
irow, icol = curves[key]['subplot']
for row in range(irow[0], min(irow[1]+1, nrows)):
for col in range(icol[0], min(icol[1]+1, ncols)):
y0lines[row, col] = y0lines[row, col] or add_y0
if nrows > 1 and ncols > 1:
sub = subp[row, col]
elif nrows > 1:
sub = subp[row]
elif ncols > 1:
sub = subp[col]
else:
sub = subp
if stick:
zeros = np.zeros(len(yaxis))
sub.vlines(xaxis, zeros, yaxis, **data)
else:
sub.plot(xaxis, yaxis, **data)
# Now set the plot grid.
for row in range(nrows):
for col in range(ncols):
if nrows > 1 and ncols > 1:
sub = subp[row, col]
elif nrows > 1:
sub = subp[row]
elif ncols > 1:
sub = subp[col]
else:
sub = subp
sub.legend()
if y0lines[row, col]:
sub.axhline(0, c='.5', zorder=-10.0)
spcdata[row][col].set_plot(sub)
if figdata['title'] is not None:
fig.suptitle(figdata['title'], fontweight='bold')
if figdata['fname'] is not None:
plt.savefig(figdata['fname'], bbox_inches='tight')
if figdata['show']:
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_8800 | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Function
from torch.autograd import Variable
from ..box_utils import decode, nms
from data import v2 as cfg
class Detect(Function):
"""At test time, Detect is the final layer of SSD. Decode location preds,
apply non-maximum suppression to location predictions based on conf
scores and threshold to a top_k number of output predictions for both
confidence score and locations.
"""
def __init__(self, num_classes, bkg_label, top_k, conf_thresh, nms_thresh):
self.num_classes = num_classes
self.background_label = bkg_label
self.top_k = top_k
# Parameters used in nms.
self.nms_thresh = nms_thresh
if nms_thresh <= 0:
raise ValueError('nms_threshold must be non negative.')
self.conf_thresh = conf_thresh
self.variance = cfg['variance']
self.output = torch.zeros(1, self.num_classes, self.top_k, 5)
def forward(self, loc_data, conf_data, prior_data):
"""
Args:
loc_data: (tensor) Loc preds from loc layers
Shape: [batch,num_priors*4]
conf_data: (tensor) Shape: Conf preds from conf layers
Shape: [batch*num_priors,num_classes]
prior_data: (tensor) Prior boxes and variances from priorbox layers
Shape: [1,num_priors,4]
"""
num = loc_data.size(0) # batch size
num_priors = prior_data.size(0)
self.output.zero_()
if num == 1:
# size batch x num_classes x num_priors
conf_preds = conf_data.t().contiguous().unsqueeze(0)
else:
conf_preds = conf_data.view(num, num_priors,
self.num_classes).transpose(2, 1)
self.output.expand_(num, self.num_classes, self.top_k, 5)
# Decode predictions into bboxes.
for i in range(num):
decoded_boxes = decode(loc_data[i], prior_data, self.variance)
# For each class, perform nms
conf_scores = conf_preds[i].clone()
num_det = 0
for cl in range(1, self.num_classes):
c_mask = conf_scores[cl].gt(self.conf_thresh)
scores = conf_scores[cl][c_mask]
if scores.size(0) == 0:
continue
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
boxes = decoded_boxes[l_mask].view(-1, 4)
# idx of highest scoring and non-overlapping boxes per class
ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)
self.output[i, cl, :count] = \
torch.cat((scores[ids[:count]].unsqueeze(1),
boxes[ids[:count]]), 1)
flt = self.output.view(-1, 5)
_, idx = flt[:, 0].sort(0)
_, rank = idx.sort(0)
flt[(rank >= self.top_k).unsqueeze(1).expand_as(flt)].fill_(0)
return self.output
|
the-stack_0_8802 | import io
import cv2
import discord
from discord.ext import commands
import debubble as db
import scrape
import secret
# Listen for RSS
# Get image from RSS
# Send to discord
bot = commands.Bot(
command_prefix="!",
description=(
"DebubbleBot automatically removes the text from speech bubbles in "
"Aurora. DebubbleBot locates speech bubbles and places a white mask "
"over each bubble it finds to hide the text.\n"
"\n"
"DebubbleBot can output in two modes. The main mode, invoked by the "
"`!debubble` command, produces a mask consisting of white blobs on a "
"transparent background. Placing the mask on top of the original page "
"removes the speech bubbles from the page. This mode makes it easy to "
"correct mistakes DebubbleBot makes. The secondary mode, invoked by "
"the `!overlay` command, writes the mask onto the original page. "
"Given that DebubbleBot sometimes thinks a sound effect or a cloud is "
"a speech bubble, this mode mostly exists for debugging.\n"
"\n"
"DebubbleBot prefers false positives to false negatives: it would "
"rather mask something that isn't actually a speech bubble than miss "
"a bubble by accident. Particularly, DebubbleBot tends to think panel "
"panel borders and clouds are speech bubbles. The rationale behind "
"this decision is that false positives are both easier to spot and "
"faster to fix in image editing software than false negatives."
)
)
@bot.command(help="Check if DebubbleBot is up.")
async def ping(ctx):
"""Ping the bot to check if it's up"""
await ctx.send("Hi!")
@bot.command(help="Produce a mask for a specific comic page.")
async def debubble(ctx, book: int, chapter: int, page: int):
"""
Produce a mask (white blobs on transparency) over a specific comic
page.
"""
await debubbler(ctx, book, chapter, page, True)
@bot.command(help="Directly remove the speech bubbles from a specific comic page. This command mostly exists for debugging.")
async def overlay(ctx, book: int, chapter: int, page: int):
"""Directly remove the speech bubbles from a specific comic page."""
await debubbler(ctx, book, chapter, page, False)
async def debubbler(ctx, book, chapter, page, masking):
async with ctx.typing():
success = scrape.scrape(book, chapter, page)
if success:
mask = db.debubble(
cv2.imread(f"scrape/{book}/{chapter}/{page:0>3}.png"),
masking=masking
)
_, data = cv2.imencode(".png", mask)
with io.BytesIO(data.tostring()) as buffer:
await ctx.send(
content=f"Debubbled {book}.{chapter}.{page}",
file=discord.File(buffer, filename="mask.png")
)
else:
await ctx.send(f"Couldn't get page {book}.{chapter}.{page}. Maybe it doesn't exist, maybe I failed to download it. ¯\\_(ツ)_/¯")
bot.run(secret.TOKEN)
|
the-stack_0_8804 | # class AbstractContract is a template for any
# EVM based contract and initializing with contract address and ABI.
# Address and ABI can be found on blockchain explorer such as https://etherscan.io
from abc import ABC
import sys
from web3 import Web3
import decimal
import argparse
# Binance Smart Chain http node provider
BSC = 'https://bsc-dataseed1.binance.org:443'
class AbstractContract(ABC):
provider = None
def __init__(self, address: str, ABI: str):
if self.provider is not None:
self.w3 = Web3(Web3.HTTPProvider(self.provider))
else:
raise ProviderInitException
try:
self.contract = self.w3.eth.contract(address, abi=ABI)
except Exception as e:
print(f'{e} in contract {address}')
@property
def address(self):
return self.contract.address
@property
def abi(self):
return self.contract.abi
def get_functions_list(self) -> list:
return self.contract.all_functions()
class BSCContract(AbstractContract):
provider = BSC
# abi is a large text stored in a txt file
with open('BEP20_abi.txt', 'r') as file:
BEP20_ABI = file.read().replace('\n', '')
def __init__(self, address: str, ABI: str=BEP20_ABI):
if self.provider is not None:
self.w3 = Web3(Web3.HTTPProvider(self.provider))
else:
raise ProviderInitException
try:
self.contract = self.w3.eth.contract(address, abi=ABI)
except Exception as e:
print(f'{e} in contract {address}')
def get_balance(wallet: str, self) -> decimal.Decimal:
try:
balance = self.contract.functions.balanceOf(wallet).call()
balance = self.w3.fromWei(balance, 'ether')
except Exception as e:
print(f'{e} in contract {self}')
balance = None
return balance
def get_balance_formatted(balance: decimal.Decimal) -> str:
try:
return str(round(balance, 1))
except:
return "Balance not found"
# Parse arguments from console
def parse_data() -> str:
parser = argparse.ArgumentParser()
parser.add_argument("--wallet", "-w", help="input wallet")
parser.add_argument("--token", "-t", help="token")
args = parser.parse_args()
return args.wallet, args.token
if __name__ == "__main__":
wallet, token = parse_data()
balance = BSCContract.get_balance_formatted(BSCContract.get_balance(wallet, BSCContract(token)))
print(balance)
|
the-stack_0_8807 | from __future__ import unicode_literals
import re
import six
import re
from collections import deque
from collections import namedtuple
def get_filter_expression(expr, names, values):
"""
Parse a filter expression into an Op.
Examples
expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)'
expr = 'Id > 5 AND Subs < 7'
"""
parser = ConditionExpressionParser(expr, names, values)
return parser.parse()
def get_expected(expected):
"""
Parse a filter expression into an Op.
Examples
expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)'
expr = 'Id > 5 AND Subs < 7'
"""
ops = {
'EQ': OpEqual,
'NE': OpNotEqual,
'LE': OpLessThanOrEqual,
'LT': OpLessThan,
'GE': OpGreaterThanOrEqual,
'GT': OpGreaterThan,
'NOT_NULL': FuncAttrExists,
'NULL': FuncAttrNotExists,
'CONTAINS': FuncContains,
'NOT_CONTAINS': FuncNotContains,
'BEGINS_WITH': FuncBeginsWith,
'IN': FuncIn,
'BETWEEN': FuncBetween,
}
# NOTE: Always uses ConditionalOperator=AND
conditions = []
for key, cond in expected.items():
path = AttributePath([key])
if 'Exists' in cond:
if cond['Exists']:
conditions.append(FuncAttrExists(path))
else:
conditions.append(FuncAttrNotExists(path))
elif 'Value' in cond:
conditions.append(OpEqual(path, AttributeValue(cond['Value'])))
elif 'ComparisonOperator' in cond:
operator_name = cond['ComparisonOperator']
values = [
AttributeValue(v)
for v in cond.get("AttributeValueList", [])]
OpClass = ops[operator_name]
conditions.append(OpClass(path, *values))
# NOTE: Ignore ConditionalOperator
ConditionalOp = OpAnd
if conditions:
output = conditions[0]
for condition in conditions[1:]:
output = ConditionalOp(output, condition)
else:
return OpDefault(None, None)
return output
class Op(object):
"""
Base class for a FilterExpression operator
"""
OP = ''
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def expr(self, item):
raise NotImplementedError("Expr not defined for {0}".format(type(self)))
def __repr__(self):
return '({0} {1} {2})'.format(self.lhs, self.OP, self.rhs)
# TODO add tests for all of these
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa
NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa
LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa
GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa
COMPARISON_FUNCS = {
'EQ': EQ_FUNCTION,
'=': EQ_FUNCTION,
'NE': NE_FUNCTION,
'!=': NE_FUNCTION,
'LE': LE_FUNCTION,
'<=': LE_FUNCTION,
'LT': LT_FUNCTION,
'<': LT_FUNCTION,
'GE': GE_FUNCTION,
'>=': GE_FUNCTION,
'GT': GT_FUNCTION,
'>': GT_FUNCTION,
# NULL means the value should not exist at all
'NULL': lambda item_value: False,
# NOT_NULL means the value merely has to exist, and values of None are valid
'NOT_NULL': lambda item_value: True,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, *test_values: item_value in test_values,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
class RecursionStopIteration(StopIteration):
pass
class ConditionExpressionParser:
def __init__(self, condition_expression, expression_attribute_names,
expression_attribute_values):
self.condition_expression = condition_expression
self.expression_attribute_names = expression_attribute_names
self.expression_attribute_values = expression_attribute_values
def parse(self):
"""Returns a syntax tree for the expression.
The tree, and all of the nodes in the tree are a tuple of
- kind: str
- children/value:
list of nodes for parent nodes
value for leaf nodes
Raises ValueError if the condition expression is invalid
Raises KeyError if expression attribute names/values are invalid
Here are the types of nodes that can be returned.
The types of child nodes are denoted with a colon (:).
An arbitrary number of children is denoted with ...
Condition:
('OR', [lhs : Condition, rhs : Condition])
('AND', [lhs: Condition, rhs: Condition])
('NOT', [argument: Condition])
('PARENTHESES', [argument: Condition])
('FUNCTION', [('LITERAL', function_name: str), argument: Operand, ...])
('BETWEEN', [query: Operand, low: Operand, high: Operand])
('IN', [query: Operand, possible_value: Operand, ...])
('COMPARISON', [lhs: Operand, ('LITERAL', comparator: str), rhs: Operand])
Operand:
('EXPRESSION_ATTRIBUTE_VALUE', value: dict, e.g. {'S': 'foobar'})
('PATH', [('LITERAL', path_element: str), ...])
NOTE: Expression attribute names will be expanded
('FUNCTION', [('LITERAL', 'size'), argument: Operand])
Literal:
('LITERAL', value: str)
"""
if not self.condition_expression:
return OpDefault(None, None)
nodes = self._lex_condition_expression()
nodes = self._parse_paths(nodes)
# NOTE: The docs say that functions should be parsed after
# IN, BETWEEN, and comparisons like <=.
# However, these expressions are invalid as function arguments,
# so it is okay to parse functions first. This needs to be done
# to interpret size() correctly as an operand.
nodes = self._apply_functions(nodes)
nodes = self._apply_comparator(nodes)
nodes = self._apply_in(nodes)
nodes = self._apply_between(nodes)
nodes = self._apply_parens_and_booleans(nodes)
node = nodes[0]
op = self._make_op_condition(node)
return op
class Kind:
"""Enum defining types of nodes in the syntax tree."""
# Condition nodes
# ---------------
OR = 'OR'
AND = 'AND'
NOT = 'NOT'
PARENTHESES = 'PARENTHESES'
FUNCTION = 'FUNCTION'
BETWEEN = 'BETWEEN'
IN = 'IN'
COMPARISON = 'COMPARISON'
# Operand nodes
# -------------
EXPRESSION_ATTRIBUTE_VALUE = 'EXPRESSION_ATTRIBUTE_VALUE'
PATH = 'PATH'
# Literal nodes
# --------------
LITERAL = 'LITERAL'
class Nonterminal:
"""Enum defining nonterminals for productions."""
CONDITION = 'CONDITION'
OPERAND = 'OPERAND'
COMPARATOR = 'COMPARATOR'
FUNCTION_NAME = 'FUNCTION_NAME'
IDENTIFIER = 'IDENTIFIER'
AND = 'AND'
OR = 'OR'
NOT = 'NOT'
BETWEEN = 'BETWEEN'
IN = 'IN'
COMMA = 'COMMA'
LEFT_PAREN = 'LEFT_PAREN'
RIGHT_PAREN = 'RIGHT_PAREN'
WHITESPACE = 'WHITESPACE'
Node = namedtuple('Node', ['nonterminal', 'kind', 'text', 'value', 'children'])
def _lex_condition_expression(self):
nodes = deque()
remaining_expression = self.condition_expression
while remaining_expression:
node, remaining_expression = \
self._lex_one_node(remaining_expression)
if node.nonterminal == self.Nonterminal.WHITESPACE:
continue
nodes.append(node)
return nodes
def _lex_one_node(self, remaining_expression):
# TODO: Handle indexing like [1]
attribute_regex = '(:|#)?[A-z0-9\-_]+'
patterns = [(
self.Nonterminal.WHITESPACE, re.compile('^ +')
), (
self.Nonterminal.COMPARATOR, re.compile(
'^('
# Put long expressions first for greedy matching
'<>|'
'<=|'
'>=|'
'=|'
'<|'
'>)'),
), (
self.Nonterminal.OPERAND, re.compile(
'^' +
attribute_regex + '(\.' + attribute_regex + '|\[[0-9]\])*')
), (
self.Nonterminal.COMMA, re.compile('^,')
), (
self.Nonterminal.LEFT_PAREN, re.compile('^\(')
), (
self.Nonterminal.RIGHT_PAREN, re.compile('^\)')
)]
for nonterminal, pattern in patterns:
match = pattern.match(remaining_expression)
if match:
match_text = match.group()
break
else: # pragma: no cover
raise ValueError("Cannot parse condition starting at: " +
remaining_expression)
value = match_text
node = self.Node(
nonterminal=nonterminal,
kind=self.Kind.LITERAL,
text=match_text,
value=match_text,
children=[])
remaining_expression = remaining_expression[len(match_text):]
return node, remaining_expression
def _parse_paths(self, nodes):
output = deque()
while nodes:
node = nodes.popleft()
if node.nonterminal == self.Nonterminal.OPERAND:
path = node.value.replace('[', '.[').split('.')
children = [
self._parse_path_element(name)
for name in path]
if len(children) == 1:
child = children[0]
if child.nonterminal != self.Nonterminal.IDENTIFIER:
output.append(child)
continue
else:
for child in children:
self._assert(
child.nonterminal == self.Nonterminal.IDENTIFIER,
"Cannot use %s in path" % child.text, [node])
output.append(self.Node(
nonterminal=self.Nonterminal.OPERAND,
kind=self.Kind.PATH,
text=node.text,
value=None,
children=children))
else:
output.append(node)
return output
def _parse_path_element(self, name):
reserved = {
'and': self.Nonterminal.AND,
'or': self.Nonterminal.OR,
'in': self.Nonterminal.IN,
'between': self.Nonterminal.BETWEEN,
'not': self.Nonterminal.NOT,
}
functions = {
'attribute_exists',
'attribute_not_exists',
'attribute_type',
'begins_with',
'contains',
'size',
}
if name.lower() in reserved:
# e.g. AND
nonterminal = reserved[name.lower()]
return self.Node(
nonterminal=nonterminal,
kind=self.Kind.LITERAL,
text=name,
value=name,
children=[])
elif name in functions:
# e.g. attribute_exists
return self.Node(
nonterminal=self.Nonterminal.FUNCTION_NAME,
kind=self.Kind.LITERAL,
text=name,
value=name,
children=[])
elif name.startswith(':'):
# e.g. :value0
return self.Node(
nonterminal=self.Nonterminal.OPERAND,
kind=self.Kind.EXPRESSION_ATTRIBUTE_VALUE,
text=name,
value=self._lookup_expression_attribute_value(name),
children=[])
elif name.startswith('#'):
# e.g. #name0
return self.Node(
nonterminal=self.Nonterminal.IDENTIFIER,
kind=self.Kind.LITERAL,
text=name,
value=self._lookup_expression_attribute_name(name),
children=[])
elif name.startswith('['):
# e.g. [123]
if not name.endswith(']'): # pragma: no cover
raise ValueError("Bad path element %s" % name)
return self.Node(
nonterminal=self.Nonterminal.IDENTIFIER,
kind=self.Kind.LITERAL,
text=name,
value=int(name[1:-1]),
children=[])
else:
# e.g. ItemId
return self.Node(
nonterminal=self.Nonterminal.IDENTIFIER,
kind=self.Kind.LITERAL,
text=name,
value=name,
children=[])
def _lookup_expression_attribute_value(self, name):
return self.expression_attribute_values[name]
def _lookup_expression_attribute_name(self, name):
return self.expression_attribute_names[name]
# NOTE: The following constructions are ordered from high precedence to low precedence
# according to
# https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Precedence
#
# = <> < <= > >=
# IN
# BETWEEN
# attribute_exists attribute_not_exists begins_with contains
# Parentheses
# NOT
# AND
# OR
#
# The grammar is taken from
# https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Syntax
#
# condition-expression ::=
# operand comparator operand
# operand BETWEEN operand AND operand
# operand IN ( operand (',' operand (, ...) ))
# function
# condition AND condition
# condition OR condition
# NOT condition
# ( condition )
#
# comparator ::=
# =
# <>
# <
# <=
# >
# >=
#
# function ::=
# attribute_exists (path)
# attribute_not_exists (path)
# attribute_type (path, type)
# begins_with (path, substr)
# contains (path, operand)
# size (path)
def _matches(self, nodes, production):
"""Check if the nodes start with the given production.
Parameters
----------
nodes: list of Node
production: list of str
The name of a Nonterminal, or '*' for anything
"""
if len(nodes) < len(production):
return False
for i in range(len(production)):
if production[i] == '*':
continue
expected = getattr(self.Nonterminal, production[i])
if nodes[i].nonterminal != expected:
return False
return True
def _apply_comparator(self, nodes):
"""Apply condition := operand comparator operand."""
output = deque()
while nodes:
if self._matches(nodes, ['*', 'COMPARATOR']):
self._assert(
self._matches(nodes, ['OPERAND', 'COMPARATOR', 'OPERAND']),
"Bad comparison", list(nodes)[:3])
lhs = nodes.popleft()
comparator = nodes.popleft()
rhs = nodes.popleft()
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.COMPARISON,
text=" ".join([
lhs.text,
comparator.text,
rhs.text]),
value=None,
children=[lhs, comparator, rhs]))
else:
output.append(nodes.popleft())
return output
def _apply_in(self, nodes):
"""Apply condition := operand IN ( operand , ... )."""
output = deque()
while nodes:
if self._matches(nodes, ['*', 'IN']):
self._assert(
self._matches(nodes, ['OPERAND', 'IN', 'LEFT_PAREN']),
"Bad IN expression", list(nodes)[:3])
lhs = nodes.popleft()
in_node = nodes.popleft()
left_paren = nodes.popleft()
all_children = [lhs, in_node, left_paren]
rhs = []
while True:
if self._matches(nodes, ['OPERAND', 'COMMA']):
operand = nodes.popleft()
separator = nodes.popleft()
all_children += [operand, separator]
rhs.append(operand)
elif self._matches(nodes, ['OPERAND', 'RIGHT_PAREN']):
operand = nodes.popleft()
separator = nodes.popleft()
all_children += [operand, separator]
rhs.append(operand)
break # Close
else:
self._assert(
False,
"Bad IN expression starting at", nodes)
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.IN,
text=" ".join([t.text for t in all_children]),
value=None,
children=[lhs] + rhs))
else:
output.append(nodes.popleft())
return output
def _apply_between(self, nodes):
"""Apply condition := operand BETWEEN operand AND operand."""
output = deque()
while nodes:
if self._matches(nodes, ['*', 'BETWEEN']):
self._assert(
self._matches(nodes, ['OPERAND', 'BETWEEN', 'OPERAND',
'AND', 'OPERAND']),
"Bad BETWEEN expression", list(nodes)[:5])
lhs = nodes.popleft()
between_node = nodes.popleft()
low = nodes.popleft()
and_node = nodes.popleft()
high = nodes.popleft()
all_children = [lhs, between_node, low, and_node, high]
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.BETWEEN,
text=" ".join([t.text for t in all_children]),
value=None,
children=[lhs, low, high]))
else:
output.append(nodes.popleft())
return output
def _apply_functions(self, nodes):
"""Apply condition := function_name (operand , ...)."""
output = deque()
either_kind = {self.Kind.PATH, self.Kind.EXPRESSION_ATTRIBUTE_VALUE}
expected_argument_kind_map = {
'attribute_exists': [{self.Kind.PATH}],
'attribute_not_exists': [{self.Kind.PATH}],
'attribute_type': [either_kind, {self.Kind.EXPRESSION_ATTRIBUTE_VALUE}],
'begins_with': [either_kind, either_kind],
'contains': [either_kind, either_kind],
'size': [{self.Kind.PATH}],
}
while nodes:
if self._matches(nodes, ['FUNCTION_NAME']):
self._assert(
self._matches(nodes, ['FUNCTION_NAME', 'LEFT_PAREN',
'OPERAND', '*']),
"Bad function expression at", list(nodes)[:4])
function_name = nodes.popleft()
left_paren = nodes.popleft()
all_children = [function_name, left_paren]
arguments = []
while True:
if self._matches(nodes, ['OPERAND', 'COMMA']):
operand = nodes.popleft()
separator = nodes.popleft()
all_children += [operand, separator]
arguments.append(operand)
elif self._matches(nodes, ['OPERAND', 'RIGHT_PAREN']):
operand = nodes.popleft()
separator = nodes.popleft()
all_children += [operand, separator]
arguments.append(operand)
break # Close paren
else:
self._assert(
False,
"Bad function expression", all_children + list(nodes)[:2])
expected_kinds = expected_argument_kind_map[function_name.value]
self._assert(
len(arguments) == len(expected_kinds),
"Wrong number of arguments in", all_children)
for i in range(len(expected_kinds)):
self._assert(
arguments[i].kind in expected_kinds[i],
"Wrong type for argument %d in" % i, all_children)
if function_name.value == 'size':
nonterminal = self.Nonterminal.OPERAND
else:
nonterminal = self.Nonterminal.CONDITION
nodes.appendleft(self.Node(
nonterminal=nonterminal,
kind=self.Kind.FUNCTION,
text=" ".join([t.text for t in all_children]),
value=None,
children=[function_name] + arguments))
else:
output.append(nodes.popleft())
return output
def _apply_parens_and_booleans(self, nodes, left_paren=None):
"""Apply condition := ( condition ) and booleans."""
output = deque()
while nodes:
if self._matches(nodes, ['LEFT_PAREN']):
parsed = self._apply_parens_and_booleans(nodes, left_paren=nodes.popleft())
self._assert(
len(parsed) >= 1,
"Failed to close parentheses at", nodes)
parens = parsed.popleft()
self._assert(
parens.kind == self.Kind.PARENTHESES,
"Failed to close parentheses at", nodes)
output.append(parens)
nodes = parsed
elif self._matches(nodes, ['RIGHT_PAREN']):
self._assert(
left_paren is not None,
"Unmatched ) at", nodes)
close_paren = nodes.popleft()
children = self._apply_booleans(output)
all_children = [left_paren] + list(children) + [close_paren]
return deque([
self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.PARENTHESES,
text=" ".join([t.text for t in all_children]),
value=None,
children=list(children),
)] + list(nodes))
else:
output.append(nodes.popleft())
self._assert(
left_paren is None,
"Unmatched ( at", list(output))
return self._apply_booleans(output)
def _apply_booleans(self, nodes):
"""Apply and, or, and not constructions."""
nodes = self._apply_not(nodes)
nodes = self._apply_and(nodes)
nodes = self._apply_or(nodes)
# The expression should reduce to a single condition
self._assert(
len(nodes) == 1,
"Unexpected expression at", list(nodes)[1:])
self._assert(
nodes[0].nonterminal == self.Nonterminal.CONDITION,
"Incomplete condition", nodes)
return nodes
def _apply_not(self, nodes):
"""Apply condition := NOT condition."""
output = deque()
while nodes:
if self._matches(nodes, ['NOT']):
self._assert(
self._matches(nodes, ['NOT', 'CONDITION']),
"Bad NOT expression", list(nodes)[:2])
not_node = nodes.popleft()
child = nodes.popleft()
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.NOT,
text=" ".join([not_node.text, child.text]),
value=None,
children=[child]))
else:
output.append(nodes.popleft())
return output
def _apply_and(self, nodes):
"""Apply condition := condition AND condition."""
output = deque()
while nodes:
if self._matches(nodes, ['*', 'AND']):
self._assert(
self._matches(nodes, ['CONDITION', 'AND', 'CONDITION']),
"Bad AND expression", list(nodes)[:3])
lhs = nodes.popleft()
and_node = nodes.popleft()
rhs = nodes.popleft()
all_children = [lhs, and_node, rhs]
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.AND,
text=" ".join([t.text for t in all_children]),
value=None,
children=[lhs, rhs]))
else:
output.append(nodes.popleft())
return output
def _apply_or(self, nodes):
"""Apply condition := condition OR condition."""
output = deque()
while nodes:
if self._matches(nodes, ['*', 'OR']):
self._assert(
self._matches(nodes, ['CONDITION', 'OR', 'CONDITION']),
"Bad OR expression", list(nodes)[:3])
lhs = nodes.popleft()
or_node = nodes.popleft()
rhs = nodes.popleft()
all_children = [lhs, or_node, rhs]
nodes.appendleft(self.Node(
nonterminal=self.Nonterminal.CONDITION,
kind=self.Kind.OR,
text=" ".join([t.text for t in all_children]),
value=None,
children=[lhs, rhs]))
else:
output.append(nodes.popleft())
return output
def _make_operand(self, node):
if node.kind == self.Kind.PATH:
return AttributePath([child.value for child in node.children])
elif node.kind == self.Kind.EXPRESSION_ATTRIBUTE_VALUE:
return AttributeValue(node.value)
elif node.kind == self.Kind.FUNCTION:
# size()
function_node = node.children[0]
arguments = node.children[1:]
function_name = function_node.value
arguments = [self._make_operand(arg) for arg in arguments]
return FUNC_CLASS[function_name](*arguments)
else: # pragma: no cover
raise ValueError("Unknown operand: %r" % node)
def _make_op_condition(self, node):
if node.kind == self.Kind.OR:
lhs, rhs = node.children
return OpOr(
self._make_op_condition(lhs),
self._make_op_condition(rhs))
elif node.kind == self.Kind.AND:
lhs, rhs = node.children
return OpAnd(
self._make_op_condition(lhs),
self._make_op_condition(rhs))
elif node.kind == self.Kind.NOT:
child, = node.children
return OpNot(self._make_op_condition(child))
elif node.kind == self.Kind.PARENTHESES:
child, = node.children
return self._make_op_condition(child)
elif node.kind == self.Kind.FUNCTION:
function_node = node.children[0]
arguments = node.children[1:]
function_name = function_node.value
arguments = [self._make_operand(arg) for arg in arguments]
return FUNC_CLASS[function_name](*arguments)
elif node.kind == self.Kind.BETWEEN:
query, low, high = node.children
return FuncBetween(
self._make_operand(query),
self._make_operand(low),
self._make_operand(high))
elif node.kind == self.Kind.IN:
query = node.children[0]
possible_values = node.children[1:]
query = self._make_operand(query)
possible_values = [self._make_operand(v) for v in possible_values]
return FuncIn(query, *possible_values)
elif node.kind == self.Kind.COMPARISON:
lhs, comparator, rhs = node.children
return COMPARATOR_CLASS[comparator.value](
self._make_operand(lhs),
self._make_operand(rhs))
else: # pragma: no cover
raise ValueError("Unknown expression node kind %r" % node.kind)
def _print_debug(self, nodes): # pragma: no cover
print('ROOT')
for node in nodes:
self._print_node_recursive(node, depth=1)
def _print_node_recursive(self, node, depth=0): # pragma: no cover
if len(node.children) > 0:
print(' ' * depth, node.nonterminal, node.kind)
for child in node.children:
self._print_node_recursive(child, depth=depth + 1)
else:
print(' ' * depth, node.nonterminal, node.kind, node.value)
def _assert(self, condition, message, nodes):
if not condition:
raise ValueError(message + " " + " ".join([t.text for t in nodes]))
class Operand(object):
def expr(self, item):
raise NotImplementedError
def get_type(self, item):
raise NotImplementedError
class AttributePath(Operand):
def __init__(self, path):
"""Initialize the AttributePath.
Parameters
----------
path: list of int/str
"""
assert len(path) >= 1
self.path = path
def _get_attr(self, item):
if item is None:
return None
base = self.path[0]
if base not in item.attrs:
return None
attr = item.attrs[base]
for name in self.path[1:]:
attr = attr.child_attr(name)
if attr is None:
return None
return attr
def expr(self, item):
attr = self._get_attr(item)
if attr is None:
return None
else:
return attr.cast_value
def get_type(self, item):
attr = self._get_attr(item)
if attr is None:
return None
else:
return attr.type
def __repr__(self):
return ".".join(self.path)
class AttributeValue(Operand):
def __init__(self, value):
"""Initialize the AttributePath.
Parameters
----------
value: dict
e.g. {'N': '1.234'}
"""
self.type = list(value.keys())[0]
self.value = value[self.type]
def expr(self, item):
# TODO: Reuse DynamoType code
if self.type == 'N':
try:
return int(self.value)
except ValueError:
return float(self.value)
elif self.type in ['SS', 'NS', 'BS']:
sub_type = self.type[0]
return set([AttributeValue({sub_type: v}).expr(item)
for v in self.value])
elif self.type == 'L':
return [AttributeValue(v).expr(item) for v in self.value]
elif self.type == 'M':
return dict([
(k, AttributeValue(v).expr(item))
for k, v in self.value.items()])
else:
return self.value
return self.value
def get_type(self, item):
return self.type
def __repr__(self):
return repr(self.value)
class OpDefault(Op):
OP = 'NONE'
def expr(self, item):
"""If no condition is specified, always True."""
return True
class OpNot(Op):
OP = 'NOT'
def __init__(self, lhs):
super(OpNot, self).__init__(lhs, None)
def expr(self, item):
lhs = self.lhs.expr(item)
return not lhs
def __str__(self):
return '({0} {1})'.format(self.OP, self.lhs)
class OpAnd(Op):
OP = 'AND'
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs and rhs
class OpLessThan(Op):
OP = '<'
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs < rhs
class OpGreaterThan(Op):
OP = '>'
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs > rhs
class OpEqual(Op):
OP = '='
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs == rhs
class OpNotEqual(Op):
OP = '<>'
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs != rhs
class OpLessThanOrEqual(Op):
OP = '<='
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs <= rhs
class OpGreaterThanOrEqual(Op):
OP = '>='
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs >= rhs
class OpOr(Op):
OP = 'OR'
def expr(self, item):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
return lhs or rhs
class Func(object):
"""
Base class for a FilterExpression function
"""
FUNC = 'Unknown'
def __init__(self, *arguments):
self.arguments = arguments
def expr(self, item):
raise NotImplementedError
def __repr__(self):
return '{0}({1})'.format(
self.FUNC,
" ".join([repr(arg) for arg in self.arguments]))
class FuncAttrExists(Func):
FUNC = 'attribute_exists'
def __init__(self, attribute):
self.attr = attribute
super(FuncAttrExists, self).__init__(attribute)
def expr(self, item):
return self.attr.get_type(item) is not None
def FuncAttrNotExists(attribute):
return OpNot(FuncAttrExists(attribute))
class FuncAttrType(Func):
FUNC = 'attribute_type'
def __init__(self, attribute, _type):
self.attr = attribute
self.type = _type
super(FuncAttrType, self).__init__(attribute, _type)
def expr(self, item):
return self.attr.get_type(item) == self.type.expr(item)
class FuncBeginsWith(Func):
FUNC = 'begins_with'
def __init__(self, attribute, substr):
self.attr = attribute
self.substr = substr
super(FuncBeginsWith, self).__init__(attribute, substr)
def expr(self, item):
if self.attr.get_type(item) != 'S':
return False
if self.substr.get_type(item) != 'S':
return False
return self.attr.expr(item).startswith(self.substr.expr(item))
class FuncContains(Func):
FUNC = 'contains'
def __init__(self, attribute, operand):
self.attr = attribute
self.operand = operand
super(FuncContains, self).__init__(attribute, operand)
def expr(self, item):
if self.attr.get_type(item) in ('S', 'SS', 'NS', 'BS', 'L'):
try:
return self.operand.expr(item) in self.attr.expr(item)
except TypeError:
return False
return False
def FuncNotContains(attribute, operand):
return OpNot(FuncContains(attribute, operand))
class FuncSize(Func):
FUNC = 'size'
def __init__(self, attribute):
self.attr = attribute
super(FuncSize, self).__init__(attribute)
def expr(self, item):
if self.attr.get_type(item) is None:
raise ValueError('Invalid attribute name {0}'.format(self.attr))
if self.attr.get_type(item) in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'):
return len(self.attr.expr(item))
raise ValueError('Invalid filter expression')
class FuncBetween(Func):
FUNC = 'BETWEEN'
def __init__(self, attribute, start, end):
self.attr = attribute
self.start = start
self.end = end
super(FuncBetween, self).__init__(attribute, start, end)
def expr(self, item):
return self.start.expr(item) <= self.attr.expr(item) <= self.end.expr(item)
class FuncIn(Func):
FUNC = 'IN'
def __init__(self, attribute, *possible_values):
self.attr = attribute
self.possible_values = possible_values
super(FuncIn, self).__init__(attribute, *possible_values)
def expr(self, item):
for possible_value in self.possible_values:
if self.attr.expr(item) == possible_value.expr(item):
return True
return False
COMPARATOR_CLASS = {
'<': OpLessThan,
'>': OpGreaterThan,
'<=': OpLessThanOrEqual,
'>=': OpGreaterThanOrEqual,
'=': OpEqual,
'<>': OpNotEqual
}
FUNC_CLASS = {
'attribute_exists': FuncAttrExists,
'attribute_not_exists': FuncAttrNotExists,
'attribute_type': FuncAttrType,
'begins_with': FuncBeginsWith,
'contains': FuncContains,
'size': FuncSize,
'between': FuncBetween
}
|
the-stack_0_8808 | import setuptools
import sys
import pathlib
if sys.version_info.major < 3:
print("\nPython 2 is not supported! \nPlease upgrade to Python 3.\n")
print(
"Installation of BookCut stopped, please try again with\n"
"a newer version of Python!"
)
sys.exit(1)
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setuptools.setup(
name="BookCut",
python_requires=">3.5.2",
version="1.3.6",
author="Costis94",
author_email="[email protected]",
description="Command Line Interface app to download ebooks",
long_description_content_type="text/markdown",
long_description=README,
url="https://github.com/costis94/bookcut",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"pandas",
"click>=7.1.2",
"requests",
"beautifulsoup4",
"pyfiglet",
"tqdm",
"mechanize",
],
extras_require={
"dev": [
"pytest",
"pytest-cov",
"pre-commit",
"black",
]
},
include_package_data=True,
entry_points="""
[console_scripts]
bookcut=bookcut.bookcut:entry
""",
)
|
the-stack_0_8810 | from flask import g
from models import Duck, Pink
from core.base import single_query
from core.singleton import redis
def pink_serializer(pink=None, pinks=None):
if not pinks:
result = pink.__json__()
result['last_access'] = redis.hget('last_access', g.pink_id)
if pink.id == g.pink_id:
user, host = pink.email.split('@')
result['email'] = f'{user[0:3]}******@{host}'
else:
result = list()
queue = list()
for p in pinks:
result.append(p.__json__())
queue.append(p.id)
last_access = redis.hmget('last_access', *queue)
for info, access_time in zip(result, last_access):
info['last_access'] = access_time
return result
class PinkQuery():
@staticmethod
def single(id_):
pink = single_query(model=Pink, id_or_obj=id_, condiction=lambda obj: obj.id == g.pink_id)
return pink_serializer(pink)
@staticmethod
def search(deps, name, qq):
query = Pink.query
if deps:
query.filter(Pink.deps.in_(deps))
else:
if name:
query.filter(Pink.name.ilike(f'%{name}%'))
if qq:
query.filter(Pink.qq.like(f'%{qq}%'))
pinks = query.all()
return pink_serializer(pinks=pinks)
class DuckQuery():
@staticmethod
def ducks(pink_id, node, nodes, allow):
query = Duck.query.filter_by(pink_id=pink_id)
if node:
query.filter(Duck.node.ilike(f'%{node}%'))
else:
if nodes:
query.filter(Duck.node.in_(nodes))
if allow is not None:
query.filter_by(allow=allow)
return query.all()
|
the-stack_0_8811 | # -*- coding: utf-8 -*-
import pytest
from cmarshmallow import fields
from cmarshmallow.marshalling import Marshaller, Unmarshaller, missing
from cmarshmallow.exceptions import ValidationError
from tests.base import User
def test_missing_is_falsy():
assert bool(missing) is False
class TestMarshaller:
@pytest.fixture()
def marshal(self):
return Marshaller()
def test_prefix(self):
u = User("Foo", email="[email protected]")
marshal = Marshaller(prefix='usr_')
result = marshal(u, {"email": fields.Email(), 'name': fields.String()})
assert result['usr_name'] == u.name
assert result['usr_email'] == u.email
def test_marshalling_generator(self, marshal):
gen = (u for u in [User("Foo"), User("Bar")])
res = marshal(gen, {"name": fields.String()}, many=True)
assert len(res) == 2
def test_default_to_missing(self, marshal):
u = {'name': 'Foo'}
res = marshal(u, {'name': fields.String(),
'email': fields.Email(default=missing)})
assert res['name'] == u['name']
assert 'email' not in res
def test_serialize_fields_with_load_only_param(self, marshal):
u = User('Foo', email='[email protected]')
fields_dict = {
'name': fields.String(),
'email': fields.Email(load_only=True),
}
result = marshal(u, fields_dict)
assert result['name'] == 'Foo'
assert 'email' not in result
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/538
def test_missing_data_are_skipped(self, marshal):
assert marshal({}, {'foo': fields.Field()}) == {}
assert marshal({}, {'foo': fields.Str()}) == {}
assert marshal({}, {'foo': fields.Int()}) == {}
assert marshal({}, {'foo': fields.Int(as_string=True)}) == {}
assert marshal({}, {'foo': fields.Decimal(as_string=True)}) == {}
def test_serialize_with_load_only_doesnt_validate(self, marshal):
fields_dict = {
'email': fields.Email(load_only=True)
}
marshal({'email': 'invalid'}, fields_dict)
assert 'email' not in marshal.errors
def test_serialize_fields_with_dump_to_param(self, marshal):
data = {
'name': 'Mike',
'email': '[email protected]',
}
fields_dict = {
'name': fields.String(dump_to='NaMe'),
'email': fields.Email(attribute='email', dump_to='EmAiL'),
}
result = marshal.serialize(data, fields_dict)
assert result['NaMe'] == 'Mike'
assert result['EmAiL'] == '[email protected]'
def test_serialize_fields_with_dump_to_and_prefix_params(self):
u = User("Foo", email="[email protected]")
marshal = Marshaller(prefix='usr_')
result = marshal(u, {"email": fields.Email(dump_to='EmAiL'),
'name': fields.String(dump_to='NaMe')})
assert result['usr_NaMe'] == u.name
assert result['usr_EmAiL'] == u.email
def test_stores_indices_of_errors_when_many_equals_true(self, marshal):
users = [
{'email': '[email protected]'},
{'email': 'foobar'},
{'email': 'invalid'},
]
try:
marshal(users, {'email': fields.Email()}, many=True)
except ValidationError:
pass
# 2nd and 3rd elements have an error
assert 1 in marshal.errors
assert 2 in marshal.errors
assert 'email' in marshal.errors[1]
assert 'email' in marshal.errors[2]
class TestUnmarshaller:
@pytest.fixture
def unmarshal(self):
return Unmarshaller()
def test_extra_data_is_ignored(self, unmarshal):
fields_ = {'name': fields.Str()}
ret = unmarshal({'extra': 42, 'name': 'Steve'}, fields_)
assert 'extra' not in ret
# def test_strict_mode_many(self, unmarshal):
# users = [
# {'email': 'foobar'},
# {'email': '[email protected]'}
# ]
# with pytest.raises(ValidationError) as excinfo:
# unmarshal(users, {'email': fields.Email()}, strict=True, many=True)
# assert 'Not a valid email address.' in str(excinfo)
def test_stores_errors(self, unmarshal):
data = {'email': 'invalid-email'}
try:
unmarshal(data, {"email": fields.Email()})
except ValidationError:
pass
assert "email" in unmarshal.errors
def test_stores_indices_of_errors_when_many_equals_true(self, unmarshal):
users = [
{'email': '[email protected]'},
{'email': 'foobar'},
{'email': 'invalid'},
]
try:
unmarshal(users, {'email': fields.Email()}, many=True)
except ValidationError:
pass
# 2nd and 3rd elements have an error
assert 1 in unmarshal.errors
assert 2 in unmarshal.errors
assert 'email' in unmarshal.errors[1]
assert 'email' in unmarshal.errors[2]
def test_deserialize(self, unmarshal):
user_data = {
'age': '12'
}
result = unmarshal.deserialize(user_data, {'age': fields.Integer()})
assert result['age'] == 12
def test_extra_fields(self, unmarshal):
data = {'name': 'Mick'}
fields_dict = {'name': fields.String(), 'age': fields.Integer()}
# data doesn't have to have all the fields in the schema
result = unmarshal(data, fields_dict)
assert result['name'] == data['name']
assert 'age' not in result
def test_deserialize_many(self, unmarshal):
users_data = [
{'name': 'Mick', 'age': '71'},
{'name': 'Keith', 'age': '70'}
]
fields_dict = {
'name': fields.String(),
'age': fields.Integer(),
}
result = unmarshal.deserialize(users_data, fields_dict, many=True)
assert isinstance(result, list)
user = result[0]
assert user['age'] == 71
# def test_deserialize_strict_raises_error(self, unmarshal):
# with pytest.raises(ValidationError):
# unmarshal(
# {'email': 'invalid', 'name': 'Mick'},
# {'email': fields.Email(), 'name': fields.String()},
# strict=True
# )
def test_deserialize_stores_errors(self, unmarshal):
user_data = {
'email': 'invalid',
'age': 'nan',
'name': 'Valid Name',
}
fields_dict = {
'email': fields.Email(),
'age': fields.Integer(),
'name': fields.String(),
}
try:
unmarshal(user_data, fields_dict)
except ValidationError:
pass
errors = unmarshal.errors
assert 'email' in errors
assert 'age' in errors
assert 'name' not in errors
def test_deserialize_fields_with_attribute_param(self, unmarshal):
data = {
'username': '[email protected]',
'name': 'Mick'
}
fields_dict = {
'username': fields.Email(attribute='email'),
'name': fields.String(attribute='firstname'),
}
result = unmarshal.deserialize(data, fields_dict)
assert result['email'] == '[email protected]'
assert result['firstname'] == 'Mick'
def test_deserialize_fields_with_load_from_param(self, unmarshal):
data = {
'Name': 'Mick',
'UserName': '[email protected]',
'years': '42'
}
fields_dict = {
'name': fields.String(load_from='Name'),
'username': fields.Email(attribute='email', load_from='UserName'),
'years': fields.Integer(attribute='age', load_from='Years')
}
result = unmarshal.deserialize(data, fields_dict)
assert result['name'] == 'Mick'
assert result['email'] == '[email protected]'
assert result['age'] == 42
def test_deserialize_fields_with_dump_only_param(self, unmarshal):
data = {
'name': 'Mick',
'years': '42',
}
fields_dict = {
'name': fields.String(),
'years': fields.Integer(dump_only=True),
'always_invalid': fields.Field(validate=lambda f: False, dump_only=True)
}
result = unmarshal.deserialize(data, fields_dict)
assert result['name'] == 'Mick'
assert 'years' not in result
assert 'always_invalid' not in unmarshal.errors
|
the-stack_0_8812 | __version__ = '1.0'
__author__ = 'Zachary Nowak'
"""STANDARD LIBRARY IMPORTS"""
import glob
import os
import json
os.chdir("/Users/zacan/OneDrive/Documents/GitHub/Keyboard-Biometric-Testing/Project_Tuples/library")
listOfTxtFiles = []
for file in glob.glob("*.txt"):
listOfTxtFiles.append(file)
print(listOfTxtFiles)
for file in listOfTxtFiles:
dict = json.load(open(file,'r'))
print(dict) |
the-stack_0_8813 | import json
import logging
from datetime import timedelta
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import F, Q, Count
from itertools import chain
from tornado import ioloop, gen
from tornado.websocket import WebSocketHandler, WebSocketClosedError
from chat.models import User, Message, UserJoinedInfo, Room, RoomUsers, UserProfile
from chat.py2_3 import str_type
from chat.tornado.anti_spam import AntiSpam
from chat.tornado.constants import VarNames, HandlerNames, Actions, RedisPrefix
from chat.tornado.message_creator import MessagesCreator
from chat.tornado.message_handler import MessagesHandler, WebRtcMessageHandler
from chat.utils import execute_query, get_message_images_videos, get_history_message_query, create_id, \
get_or_create_ip_model
parent_logger = logging.getLogger(__name__)
class Error401(Exception):
pass
class TornadoHandler(WebSocketHandler, WebRtcMessageHandler):
def __init__(self, *args, **kwargs):
super(TornadoHandler, self).__init__(*args, **kwargs)
self.__connected__ = False
self.restored_connection = False
self.anti_spam = AntiSpam()
@property
def connected(self):
return self.__connected__
@connected.setter
def connected(self, value):
self.__connected__ = value
def data_received(self, chunk):
pass
def on_message(self, json_message):
message = None
try:
if not self.connected:
raise ValidationError('Skipping message %s, as websocket is not initialized yet' % json_message)
if not json_message:
raise Exception('Skipping null message')
# self.anti_spam.check_spam(json_message)
self.logger.debug('<< %.1000s', json_message)
message = json.loads(json_message)
if message[VarNames.EVENT] not in self.process_ws_message:
raise Exception("event {} is unknown".format(message[VarNames.EVENT]))
channel = message.get(VarNames.ROOM_ID)
if channel and channel not in self.channels:
raise ValidationError('Access denied for channel {}. Allowed channels: {}'.format(channel, self.channels))
self.process_ws_message[message[VarNames.EVENT]](message)
except ValidationError as e:
error_message = self.default(str(e.message), Actions.GROWL_MESSAGE, HandlerNames.WS)
if message:
error_message[VarNames.JS_MESSAGE_ID] = message.get(VarNames.JS_MESSAGE_ID, None)
self.ws_write(error_message)
def on_close(self):
if self.async_redis.subscribed:
self.logger.info("Close event, unsubscribing from %s", self.channels)
self.async_redis.unsubscribe(self.channels)
else:
self.logger.info("Close event, not subscribed, channels: %s", self.channels)
self.async_redis_publisher.srem(RedisPrefix.ONLINE_VAR, self.id)
is_online, online = self.get_online_and_status_from_redis()
if self.connected:
if not is_online:
message = self.room_online_logout(online)
self.publish(message, settings.ALL_ROOM_ID)
res = execute_query(settings.UPDATE_LAST_READ_MESSAGE, [self.user_id, ])
self.logger.info("Updated %s last read message", res)
self.disconnect()
def disconnect(self, tries=0):
"""
Closes a connection if it's not in proggress, otherwice timeouts closing
https://github.com/evilkost/brukva/issues/25#issuecomment-9468227
"""
self.connected = False
self.closed_channels = self.channels
self.channels = []
if self.async_redis.connection.in_progress and tries < 1000: # failsafe eternal loop
self.logger.debug('Closing a connection timeouts')
ioloop.IOLoop.instance().add_timeout(timedelta(0.00001), self.disconnect, tries+1)
else:
self.logger.info("Close connection result: %s")
self.async_redis.disconnect()
def generate_self_id(self):
"""
When user opens new tab in browser wsHandler.wsConnectionId stores Id of current ws
So if ws loses a connection it still can reconnect with same id,
and TornadoHandler can restore webrtc_connections to previous state
"""
conn_arg = self.get_argument('id', None)
self.id, random = create_id(self.user_id, conn_arg)
self.restored_connection = random == conn_arg
self.restored_connection = False
self.save_ip()
def open(self):
session_key = self.get_argument('sessionId', None)
user_id = self.sync_redis.hget('sessions', session_key)
if user_id is None:
self.logger.warning('!! Session key %s has been rejected' % session_key)
self.close(403, "Session key %s has been rejected" % session_key)
return
self.user_id = int(user_id)
self.ip = self.get_client_ip()
user_db = UserProfile.objects.get(id=self.user_id)
self.generate_self_id()
self._logger = logging.LoggerAdapter(parent_logger, {
'id': self.id,
'ip': self.ip
})
self.logger.debug("!! Incoming connection, session %s, thread hash %s", session_key, self.id)
self.async_redis.connect()
self.async_redis_publisher.sadd(RedisPrefix.ONLINE_VAR, self.id)
# since we add user to online first, latest trigger will always show correct online
was_online, online = self.get_online_and_status_from_redis()
user_rooms_query = Room.objects.filter(users__id=self.user_id, disabled=False) \
.values('id', 'name', 'roomusers__notifications', 'roomusers__volume')
room_users = [{
VarNames.ROOM_ID: room['id'],
VarNames.ROOM_NAME: room['name'],
VarNames.NOTIFICATIONS: room['roomusers__notifications'],
VarNames.VOLUME: room['roomusers__volume'],
VarNames.ROOM_USERS: []
} for room in user_rooms_query]
user_rooms_dict = {room[VarNames.ROOM_ID]: room for room in room_users}
room_ids = [room_id[VarNames.ROOM_ID] for room_id in room_users]
rooms_users = RoomUsers.objects.filter(room_id__in=room_ids).values('user_id', 'room_id')
for ru in rooms_users:
user_rooms_dict[ru['room_id']][VarNames.ROOM_USERS].append(ru['user_id'])
# get all missed messages
self.channels = room_ids # py2 doesn't support clear()
self.channels.append(self.channel)
self.channels.append(self.id)
self.listen(self.channels)
off_messages, history = self.get_offline_messages(room_users, was_online, self.get_argument('history', False))
for room in room_users:
room_id = room[VarNames.ROOM_ID]
h = history.get(room_id)
o = off_messages.get(room_id)
if h:
room[VarNames.LOAD_MESSAGES_HISTORY] = h
if o:
room[VarNames.LOAD_MESSAGES_OFFLINE] = o
if settings.SHOW_COUNTRY_CODE:
fetched_users = User.objects.annotate(user_c=Count('id')).values('id', 'username', 'sex', 'userjoinedinfo__ip__country_code', 'userjoinedinfo__ip__country', 'userjoinedinfo__ip__region', 'userjoinedinfo__ip__city')
user_dict = [RedisPrefix.set_js_user_structure_flag(
user['id'],
user['username'],
user['sex'],
user['userjoinedinfo__ip__country_code'],
user['userjoinedinfo__ip__country'],
user['userjoinedinfo__ip__region'],
user['userjoinedinfo__ip__city']
) for user in fetched_users]
else:
fetched_users = User.objects.values('id', 'username', 'sex')
user_dict = [RedisPrefix.set_js_user_structure(
user['id'],
user['username'],
user['sex']
) for user in fetched_users]
if self.user_id not in online:
online.append(self.user_id)
self.ws_write(self.set_room(room_users, user_dict, online, user_db))
if not was_online: # if a new tab has been opened
online_user_names_mes = self.room_online_login(online, user_db.username, user_db.sex_str)
self.logger.info('!! First tab, sending refresh online for all')
self.publish(online_user_names_mes, settings.ALL_ROOM_ID)
self.logger.info("!! User %s subscribes for %s", self.user_id, self.channels)
self.connected = True
def get_offline_messages(self, user_rooms, was_online, with_history):
q_objects = get_history_message_query(self.get_argument('messages', None), user_rooms, with_history)
if was_online:
off_messages = []
else:
off_messages = Message.objects.filter(
id__gt=F('room__roomusers__last_read_message_id'),
room__roomusers__user_id=self.user_id
)
off = {}
history = {}
if len(q_objects.children) > 0:
history_messages = Message.objects.filter(q_objects)
all = list(chain(off_messages, history_messages))
self.logger.info("Offline messages IDs: %s, history messages: %s", [m.id for m in off_messages], [m.id for m in history_messages])
else:
history_messages = []
all = off_messages
if self.restored_connection:
off_messages = all
history_messages = []
imv = get_message_images_videos(all)
self.set_video_images_messages(imv, off_messages, off)
self.set_video_images_messages(imv, history_messages, history)
return off, history
def set_video_images_messages(self, imv, inm, outm):
for message in inm:
files = MessagesCreator.prepare_img_video(imv, message.id)
prep_m = self.create_message(message, files)
outm.setdefault(message.room_id, []).append(prep_m)
def check_origin(self, origin):
"""
check whether browser set domain matches origin
"""
return True # we don't use cookies
@gen.coroutine
def save_ip(self):
"""
This code is not used anymore
"""
if not UserJoinedInfo.objects.filter(
Q(ip__ip=self.ip) & Q(user_id=self.user_id)).exists():
ip = yield from get_or_create_ip_model(self.ip, self.logger)
UserJoinedInfo.objects.create(ip=ip, user_id=self.user_id)
def ws_write(self, message):
"""
Tries to send message, doesn't throw exception outside
:type self: MessagesHandler
:type message object
"""
# self.logger.debug('<< THREAD %s >>', os.getppid())
try:
if isinstance(message, dict):
message = json.dumps(message)
if not isinstance(message, str_type):
raise ValueError('Wrong message type : %s' % str(message))
self.logger.debug(">> %.1000s", message)
self.write_message(message)
except WebSocketClosedError as e:
self.logger.warning("%s. Can't send message << %s >> ", e, str(message))
def get_client_ip(self):
return self.request.headers.get("X-Real-IP") or self.request.remote_ip
|
the-stack_0_8817 | from dask.delayed import delayed
from .data import get_closes, get_volumes, get_yahoo_data
from .signals import get_signals
def get_full_pipeline(tickers, start_date, end_date):
"""Return the full simulation pipeline"""
yahoo_data = delayed(get_yahoo_data)(
tickers, start_date, end_date, dask_key_name="yahoo_data"
)
volumes = delayed(get_volumes)(yahoo_data, dask_key_name="volumes")
closes = delayed(get_closes)(yahoo_data, dask_key_name="closes")
signals = delayed(get_signals)(closes, volumes, dask_key_name="signals")
# The final node
final = signals
# Return a dict with each pipeline step
return {name: final[name] for name in final.dask.keys()}
|
the-stack_0_8818 | import os
import sys
import urllib.request
import json
from pathlib import Path
def main():
file_path = 'logs/release_stats.json'
repository_name = os.environ.get("REPOSITORY_NAME")
request_url = "https://api.github.com/repos/{0}/releases".format(repository_name)
print('request_url = ', request_url)
try:
req = urllib.request.urlopen(request_url)
except IOError:
print('Release not found. Aborted stats generation.')
return
git_d = json.loads(req.read())
repo_d = []
repo_ver = {}
if os.path.exists(file_path):
with open(file_path, 'r') as f:
repo_d = json.loads(f.read())
repo_ver = {d: i for i, d in enumerate(sum([list(d.keys()) for d in repo_d], []))}
output = []
for gd in git_d:
ver = gd.get('tag_name')
assets = gd.get('assets')
data_type = ['name', 'download_count', 'created_at', 'updated_at']
new_data = {}
new_data[ver] = [[] for i in range(len(assets))]
for idx, asset in enumerate(assets):
new_data[ver][idx] = {'name' : assets[idx].get('name'), 'download_count' : assets[idx].get('download_count'), 'created_at' : assets[idx].get('created_at'), 'updated_at' : assets[idx].get('updated_at')}
names = [nd.get('name') for nd in new_data[ver]]
ver_idx = repo_ver.get(ver)
if len(repo_d) > 0 and ver_idx != None:
for nd in new_data[ver]:
for idx, rd in enumerate(repo_d[ver_idx][ver]):
if rd.get('name') in nd.values():
if nd.get('created_at') != rd.get('created_at'): # 初回以降
repo_d[ver_idx][ver][idx]['download_count'] = rd.get('download_count') + nd.get('download_count')
else:
repo_d[ver_idx][ver][idx]['download_count'] = nd.get('download_count')
repo_d[ver_idx][ver][idx]['updated_at'] = nd.get('updated_at')
break
else:
repo_d[ver_idx][ver].append(nd)
repo_d[ver_idx][ver] = [rd for rd in repo_d[ver_idx][ver] if any(rd.get('name') == name for name in names)]
output.append(repo_d[ver_idx])
else:
output.append(new_data)
os.makedirs("logs", exist_ok=True)
with open(file_path, 'w+') as f:
f.write(json.dumps(output, indent=4))
print('update {0}'.format(file_path))
if __name__ == '__main__':
main()
|
the-stack_0_8820 | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
import warnings
from collections import defaultdict
import networkx as nx
from networkx.utils import open_file
__all__ = [
"write_graphml",
"read_graphml",
"generate_graphml",
"write_graphml_xml",
"write_graphml_lxml",
"parse_graphml",
"GraphMLWriter",
"GraphMLReader",
]
@open_file(1, mode="wb")
def write_graphml_xml(
G,
path,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
infer_numeric_types : boolean
Determine if numeric types should be generalized.
For example, if edges have both int and float 'weight' attributes,
we infer in GraphML that both are floats.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed
and unidirected edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(
encoding=encoding,
prettyprint=prettyprint,
infer_numeric_types=infer_numeric_types,
named_key_ids=named_key_ids,
)
writer.add_graph_element(G)
writer.dump(path)
@open_file(1, mode="wb")
def write_graphml_lxml(
G,
path,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
"""Write G in GraphML XML format to path
This function uses the LXML framework and should be faster than
the version using the xml library.
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
infer_numeric_types : boolean
Determine if numeric types should be generalized.
For example, if edges have both int and float 'weight' attributes,
we infer in GraphML that both are floats.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_graphml_lxml(G, "fourpath.graphml")
Notes
-----
This implementation does not support mixed graphs (directed
and unidirected edges together) hyperedges, nested graphs, or ports.
"""
try:
import lxml.etree as lxmletree
except ImportError:
return write_graphml_xml(
G, path, encoding, prettyprint, infer_numeric_types, named_key_ids
)
writer = GraphMLWriterLxml(
path,
graph=G,
encoding=encoding,
prettyprint=prettyprint,
infer_numeric_types=infer_numeric_types,
named_key_ids=named_key_ids,
)
writer.dump()
def generate_graphml(G, encoding="utf-8", prettyprint=True, named_key_ids=False):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed = \n
>>> s = linefeed.join(nx.generate_graphml(G))
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(
encoding=encoding, prettyprint=prettyprint, named_key_ids=named_key_ids
)
writer.add_graph_element(G)
yield from str(writer).splitlines()
@open_file(0, mode="rb")
def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
edge_key_type: Python type (default: int)
Convert graphml edge ids to this type. Multigraphs use id as edge key.
Non-multigraphs add to edge attribute dict with name "id".
force_multigraph : bool (default: False)
If True, return a multigraph with edge keys. If False (the default)
return a multigraph when multiedges are in the graph.
Returns
-------
graph: NetworkX graph
If parallel edges are present or `force_multigraph=True` then
a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph.
The returned graph is directed if the file indicates it should be.
Notes
-----
Default node and edge attributes are not propagated to each node and edge.
They can be obtained from `G.graph` and applied to node and edge attributes
if desired using something like this:
>>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
>>> for node, data in G.nodes(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
>>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
>>> for u, v, data in G.edges(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
# need to check for multiple graphs
glist = list(reader(path=path))
if len(glist) == 0:
# If no graph comes back, try looking for an incomplete header
header = b'<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
path.seek(0)
old_bytes = path.read()
new_bytes = old_bytes.replace(b"<graphml>", header)
glist = list(reader(string=new_bytes))
if len(glist) == 0:
raise nx.NetworkXError("file not successfully read as graphml")
return glist[0]
def parse_graphml(
graphml_string, node_type=str, edge_key_type=int, force_multigraph=False
):
"""Read graph in GraphML format from string.
Parameters
----------
graphml_string : string
String containing graphml information
(e.g., contents of a graphml file).
node_type: Python type (default: str)
Convert node ids to this type
edge_key_type: Python type (default: int)
Convert graphml edge ids to this type. Multigraphs use id as edge key.
Non-multigraphs add to edge attribute dict with name "id".
force_multigraph : bool (default: False)
If True, return a multigraph with edge keys. If False (the default)
return a multigraph when multiedges are in the graph.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed = \n
>>> s = linefeed.join(nx.generate_graphml(G))
>>> H = nx.parse_graphml(s)
Notes
-----
Default node and edge attributes are not propagated to each node and edge.
They can be obtained from `G.graph` and applied to node and edge attributes
if desired using something like this:
>>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
>>> for node, data in G.nodes(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
>>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
>>> for u, v, data in G.edges(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
"""
reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
# need to check for multiple graphs
glist = list(reader(string=graphml_string))
if len(glist) == 0:
# If no graph comes back, try looking for an incomplete header
header = '<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
new_string = graphml_string.replace("<graphml>", header)
glist = list(reader(string=new_string))
if len(glist) == 0:
raise nx.NetworkXError("file not successfully read as graphml")
return glist[0]
class GraphML:
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
# xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = " ".join(
[
"http://graphml.graphdrawing.org/xmlns",
"http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
]
)
def construct_types(self):
types = [
(int, "integer"), # for Gephi GraphML bug
(str, "yfiles"),
(str, "string"),
(int, "int"),
(int, "long"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
]
# These additions to types allow writing numpy types
try:
import numpy as np
except:
pass
else:
# prepend so that python types are created upon read (last entry wins)
types = [
(np.float64, "float"),
(np.float32, "float"),
(np.float16, "float"),
(np.float_, "float"),
(np.int_, "int"),
(np.int8, "int"),
(np.int16, "int"),
(np.int32, "int"),
(np.int64, "int"),
(np.uint8, "int"),
(np.uint16, "int"),
(np.uint32, "int"),
(np.uint64, "int"),
(np.int_, "int"),
(np.intc, "int"),
(np.intp, "int"),
] + types
self.xml_type = dict(types)
self.python_type = dict(reversed(a) for a in types)
# This page says that data types in GraphML follow Java(TM).
# http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition
# true and false are the only boolean literals:
# http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals
convert_bool = {
# We use data.lower() in actual use.
"true": True,
"false": False,
# Include integer strings for convenience.
"0": False,
0: False,
"1": True,
1: True,
}
class GraphMLWriter(GraphML):
def __init__(
self,
graph=None,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
self.construct_types()
from xml.etree.ElementTree import Element
self.myElement = Element
self.infer_numeric_types = infer_numeric_types
self.prettyprint = prettyprint
self.named_key_ids = named_key_ids
self.encoding = encoding
self.xml = self.myElement(
"graphml",
{
"xmlns": self.NS_GRAPHML,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
},
)
self.keys = {}
self.attributes = defaultdict(list)
self.attribute_types = defaultdict(set)
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
from xml.etree.ElementTree import tostring
if self.prettyprint:
self.indent(self.xml)
s = tostring(self.xml).decode(self.encoding)
return s
def attr_type(self, name, scope, value):
"""Infer the attribute type of data named name. Currently this only
supports inference of numeric types.
If self.infer_numeric_types is false, type is used. Otherwise, pick the
most general of types found across all values with name and scope. This
means edges with data named 'weight' are treated separately from nodes
with data named 'weight'.
"""
if self.infer_numeric_types:
types = self.attribute_types[(name, scope)]
if len(types) > 1:
types = {self.xml_type[t] for t in types}
if "string" in types:
return str
elif "float" in types or "double" in types:
return float
else:
return int
else:
return list(types)[0]
else:
return type(value)
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
if self.named_key_ids:
new_id = name
else:
new_id = f"d{len(list(self.keys))}"
self.keys[keys_key] = new_id
key_kwargs = {
"id": new_id,
"for": scope,
"attr.name": name,
"attr.type": attr_type,
}
key_element = self.myElement("key", **key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element = self.myElement("default")
default_element.text = str(default)
key_element.append(default_element)
self.xml.insert(0, key_element)
return new_id
def add_data(self, name, element_type, value, scope="all", default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
raise nx.NetworkXError(
f"GraphML writer does not support {element_type} as data values."
)
keyid = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = self.myElement("data", key=keyid)
data_element.text = str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attribute data to edges or nodes, and stores type information
to be added later. See add_graph_element.
"""
for k, v in data.items():
self.attribute_types[(str(k), scope)].add(type(v))
self.attributes[xml_obj].append([k, v, scope, default.get(k)])
def add_nodes(self, G, graph_element):
default = G.graph.get("node_default", {})
for node, data in G.nodes(data=True):
node_element = self.myElement("node", id=str(node))
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u, v, key, data in G.edges(data=True, keys=True):
edge_element = self.myElement(
"edge", source=str(u), target=str(v), id=str(key)
)
default = G.graph.get("edge_default", {})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
else:
for u, v, data in G.edges(data=True):
edge_element = self.myElement("edge", source=str(u), target=str(v))
default = G.graph.get("edge_default", {})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type = "directed"
else:
default_edge_type = "undirected"
graphid = G.graph.pop("id", None)
if graphid is None:
graph_element = self.myElement("graph", edgedefault=default_edge_type)
else:
graph_element = self.myElement(
"graph", edgedefault=default_edge_type, id=graphid
)
default = {}
data = {
k: v
for (k, v) in G.graph.items()
if k not in ["node_default", "edge_default"]
}
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G, graph_element)
self.add_edges(G, graph_element)
# self.attributes contains a mapping from XML Objects to a list of
# data that needs to be added to them.
# We postpone processing in order to do type inference/generalization.
# See self.attr_type
for (xml_obj, data) in self.attributes.items():
for (k, v, scope, default) in data:
xml_obj.append(
self.add_data(
str(k), self.attr_type(k, scope, v), str(v), scope, default
)
)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
"""Add many graphs to this GraphML document."""
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
from xml.etree.ElementTree import ElementTree
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
document.write(stream, encoding=self.encoding, xml_declaration=True)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class IncrementalElement:
"""Wrapper for _IncrementalWriter providing an Element like interface.
This wrapper does not intend to be a complete implementation but rather to
deal with those calls used in GraphMLWriter.
"""
def __init__(self, xml, prettyprint):
self.xml = xml
self.prettyprint = prettyprint
def append(self, element):
self.xml.write(element, pretty_print=self.prettyprint)
class GraphMLWriterLxml(GraphMLWriter):
def __init__(
self,
path,
graph=None,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
self.construct_types()
import lxml.etree as lxmletree
self.myElement = lxmletree.Element
self._encoding = encoding
self._prettyprint = prettyprint
self.named_key_ids = named_key_ids
self.infer_numeric_types = infer_numeric_types
self._xml_base = lxmletree.xmlfile(path, encoding=encoding)
self._xml = self._xml_base.__enter__()
self._xml.write_declaration()
# We need to have a xml variable that support insertion. This call is
# used for adding the keys to the document.
# We will store those keys in a plain list, and then after the graph
# element is closed we will add them to the main graphml element.
self.xml = []
self._keys = self.xml
self._graphml = self._xml.element(
"graphml",
{
"xmlns": self.NS_GRAPHML,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
},
)
self._graphml.__enter__()
self.keys = {}
self.attribute_types = defaultdict(set)
if graph is not None:
self.add_graph_element(graph)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type = "directed"
else:
default_edge_type = "undirected"
graphid = G.graph.pop("id", None)
if graphid is None:
graph_element = self._xml.element("graph", edgedefault=default_edge_type)
else:
graph_element = self._xml.element(
"graph", edgedefault=default_edge_type, id=graphid
)
# gather attributes types for the whole graph
# to find the most general numeric format needed.
# Then pass through attributes to create key_id for each.
graphdata = {
k: v
for k, v in G.graph.items()
if k not in ("node_default", "edge_default")
}
node_default = G.graph.get("node_default", {})
edge_default = G.graph.get("edge_default", {})
# Graph attributes
for k, v in graphdata.items():
self.attribute_types[(str(k), "graph")].add(type(v))
for k, v in graphdata.items():
element_type = self.xml_type[self.attr_type(k, "graph", v)]
self.get_key(str(k), element_type, "graph", None)
# Nodes and data
for node, d in G.nodes(data=True):
for k, v in d.items():
self.attribute_types[(str(k), "node")].add(type(v))
for node, d in G.nodes(data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "node", v)]
self.get_key(str(k), T, "node", node_default.get(k))
# Edges and data
if G.is_multigraph():
for u, v, ekey, d in G.edges(keys=True, data=True):
for k, v in d.items():
self.attribute_types[(str(k), "edge")].add(type(v))
for u, v, ekey, d in G.edges(keys=True, data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "edge", v)]
self.get_key(str(k), T, "edge", edge_default.get(k))
else:
for u, v, d in G.edges(data=True):
for k, v in d.items():
self.attribute_types[(str(k), "edge")].add(type(v))
for u, v, d in G.edges(data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "edge", v)]
self.get_key(str(k), T, "edge", edge_default.get(k))
# Now add attribute keys to the xml file
for key in self.xml:
self._xml.write(key, pretty_print=self._prettyprint)
# The incremental_writer writes each node/edge as it is created
incremental_writer = IncrementalElement(self._xml, self._prettyprint)
with graph_element:
self.add_attributes("graph", incremental_writer, graphdata, {})
self.add_nodes(G, incremental_writer) # adds attributes too
self.add_edges(G, incremental_writer) # adds attributes too
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attribute data."""
for k, v in data.items():
data_element = self.add_data(
str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k)
)
xml_obj.append(data_element)
def __str__(self):
return object.__str__(self)
def dump(self):
self._graphml.__exit__(None, None, None)
self._xml_base.__exit__(None, None, None)
# default is lxml is present.
write_graphml = write_graphml_lxml
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects."""
def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False):
self.construct_types()
self.node_type = node_type
self.edge_key_type = edge_key_type
self.multigraph = force_multigraph # If False, test for multiedges
self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes
def __call__(self, path=None, string=None):
from xml.etree.ElementTree import ElementTree, fromstring
if path is not None:
self.xml = ElementTree(file=path)
elif string is not None:
self.xml = fromstring(string)
else:
raise ValueError("Must specify either 'path' or 'string' as kwarg")
(keys, defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults, G=None):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if G is None:
if edgedefault == "directed":
G = nx.MultiDiGraph()
else:
G = nx.MultiGraph()
# set defaults for graph attributes
G.graph["node_default"] = {}
G.graph["edge_default"] = {}
for key_id, value in defaults.items():
key_for = graphml_keys[key_id]["for"]
name = graphml_keys[key_id]["name"]
python_type = graphml_keys[key_id]["type"]
if key_for == "node":
G.graph["node_default"].update({name: python_type(value)})
if key_for == "edge":
G.graph["edge_default"].update({name: python_type(value)})
# hyperedges are not supported
hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge")
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader doesn't support hyperedges")
# add nodes
for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"):
self.add_node(G, node_xml, graphml_keys, defaults)
# add edges
for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found
if self.multigraph:
return G
G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G)
# add explicit edge "id" from file as attribute in NX graph.
nx.set_edge_attributes(G, values=self.edge_ids, name="id")
return G
def add_node(self, G, node_xml, graphml_keys, defaults):
"""Add a node to the graph."""
# warn on finding unsupported ports tag
ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port")
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, **data)
# get child nodes
if node_xml.attrib.get("yfiles.foldertype") == "group":
graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph")
self.make_graph(graph_xml, graphml_keys, defaults, G)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph."""
# warn on finding unsupported ports tag
ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port")
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed == "false":
msg = "directed=false edge found in directed graph."
raise nx.NetworkXError(msg)
if (not G.is_directed()) and directed == "true":
msg = "directed=true edge found in undirected graph."
raise nx.NetworkXError(msg)
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
# self.edge_ids is used by `make_graph` method for non-multigraphs
self.edge_ids[source, target] = edge_id
try:
edge_id = self.edge_key_type(edge_id)
except ValueError: # Could not convert.
pass
else:
edge_id = data.get("key")
if G.has_edge(source, target):
# mark this as a multigraph
self.multigraph = True
# Use add_edges_from to avoid error with add_edge when `'key' in data`
# Note there is only one edge here...
G.add_edges_from([(source, target, edge_id, data)])
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"):
key = data_element.get("key")
try:
data_name = graphml_keys[key]["name"]
data_type = graphml_keys[key]["type"]
except KeyError as e:
raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from e
text = data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element)) == 0:
if data_type == bool:
# Ignore cases.
# http://docs.oracle.com/javase/6/docs/api/java/lang/
# Boolean.html#parseBoolean%28java.lang.String%29
data[data_name] = self.convert_bool[text.lower()]
else:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = None
for node_type in ["ShapeNode", "SVGNode", "ImageNode"]:
pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}"
geometry = data_element.find(f"{pref}Geometry")
if geometry is not None:
data["x"] = geometry.get("x")
data["y"] = geometry.get("y")
if node_label is None:
node_label = data_element.find(f"{pref}NodeLabel")
if node_label is not None:
data["label"] = node_label.text
# check all the different types of edges avaivable in yEd.
for e in [
"PolyLineEdge",
"SplineEdge",
"QuadCurveEdge",
"BezierEdge",
"ArcEdge",
]:
pref = f"{{{self.NS_Y}}}{e}/{{{self.NS_Y}}}"
edge_label = data_element.find(f"{pref}EdgeLabel")
if edge_label is not None:
break
if edge_label is not None:
data["label"] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml."""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"):
attr_id = k.get("id")
attr_type = k.get("attr.type")
attr_name = k.get("attr.name")
yfiles_type = k.get("yfiles.type")
if yfiles_type is not None:
attr_name = yfiles_type
attr_type = "yfiles"
if attr_type is None:
attr_type = "string"
warnings.warn(f"No key type for id {attr_id}. Using string")
if attr_name is None:
raise nx.NetworkXError(f"Unknown key for id {attr_id}.")
graphml_keys[attr_id] = {
"name": attr_name,
"type": self.python_type[attr_type],
"for": k.get("for"),
}
# check for "default" sub-element of key element
default = k.find(f"{{{self.NS_GRAPHML}}}default")
if default is not None:
# Handle default values identically to data element values
python_type = graphml_keys[attr_id]["type"]
if python_type == bool:
graphml_key_defaults[attr_id] = self.convert_bool[
default.text.lower()
]
else:
graphml_key_defaults[attr_id] = python_type(default.text)
return graphml_keys, graphml_key_defaults
|
the-stack_0_8821 | """Implementations of Real NVP."""
import torch
from torch import nn
from nflows.transforms.base import Transform
from nflows.utils import torchutils
class RealNVP(Transform):
def __init__(self, D, d, hidden):
assert d > 0
assert D > d
assert hidden > 0
super().__init__()
self.D = D
self.d = d
self.hidden = hidden
self.s_net = nn.Sequential(
nn.Linear(d, hidden),
nn.LeakyReLU(),
nn.Linear(hidden, D - d)
)
self.t_net = nn.Sequential(
nn.Linear(d, hidden),
nn.LeakyReLU(),
nn.Linear(hidden, D - d)
)
def forward(self, x, context=None):
x1, x2 = x[:, :self.d], x[:, self.d:]
s = self.s_net(x1)
t = self.t_net(x1)
z1 = x1
z2 = x2 * torch.exp(s) + t
z = torch.cat([z1, z2], dim=-1)
logabsdet = torchutils.sum_except_batch(s, num_batch_dims=1)
return z, logabsdet
def inverse(self, z, context=None):
z1, z2 = z[:, :self.d], z[:, self.d:]
s = self.s_net(z1)
t = self.t_net(z1)
x1 = z1
x2 = (z2 - t) * torch.exp(-s)
logabsdet = -torchutils.sum_except_batch(s, num_batch_dims=1)
return torch.cat([x1, x2], -1), logabsdet
|
the-stack_0_8822 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from spack.operating_systems.mac_os import macos_version
import os
import sys
MACOS_VERSION = macos_version() if sys.platform == 'darwin' else None
class Qt(Package):
"""Qt is a comprehensive cross-platform C++ application framework."""
homepage = 'http://qt.io'
# Alternative location 'http://download.qt.io/official_releases/qt/'
url = 'http://download.qt.io/archive/qt/5.7/5.7.0/single/qt-everywhere-opensource-src-5.7.0.tar.gz'
list_url = 'http://download.qt.io/archive/qt/'
list_depth = 3
phases = ['configure', 'build', 'install']
version('5.13.1', sha256='adf00266dc38352a166a9739f1a24a1e36f1be9c04bf72e16e142a256436974e')
version('5.12.5', sha256='a2299e21db7767caf98242767bffb18a2a88a42fee2d6a393bedd234f8c91298')
version('5.12.2', sha256='59b8cb4e728450b21224dcaaa40eb25bafc5196b6988f2225c394c6b7f881ff5')
version('5.11.3', sha256='859417642713cee2493ee3646a7fee782c9f1db39e41d7bb1322bba0c5f0ff4d')
version('5.11.2', sha256='c6104b840b6caee596fa9a35bc5f57f67ed5a99d6a36497b6fe66f990a53ca81')
version('5.10.0', sha256='936d4cf5d577298f4f9fdb220e85b008ae321554a5fcd38072dc327a7296230e')
version('5.9.1', sha256='7b41a37d4fe5e120cdb7114862c0153f86c07abbec8db71500443d2ce0c89795')
version('5.9.0', sha256='f70b5c66161191489fc13c7b7eb69bf9df3881596b183e7f6d94305a39837517')
version('5.8.0', sha256='9dc5932307ae452855863f6405be1f7273d91173dcbe4257561676a599bd58d3')
version('5.7.1', sha256='c86684203be61ae7b33a6cf33c23ec377f246d697bd9fb737d16f0ad798f89b7')
version('5.7.0', sha256='4661905915d6265243e17fe59852930a229cf5b054ce5af5f48b34da9112ab5f')
version('5.5.1', sha256='c7fad41a009af1996b62ec494e438aedcb072b3234b2ad3eeea6e6b1f64be3b3')
version('5.4.2', sha256='cfc768c55f0a0cd232bed914a9022528f8f2e50cb010bf0e4f3f62db3dfa17bd')
version('5.4.0', sha256='1739633424bde3d89164ae6ff1c5c913be38b9997e451558ef873aac4bbc408a')
version('5.3.2', sha256='c8d3fd2ead30705c6673c5e4af6c6f3973346b4fb2bd6079c7be0943a5b0282d')
version('5.2.1', sha256='84e924181d4ad6db00239d87250cc89868484a14841f77fb85ab1f1dbdcd7da1')
version('4.8.7', sha256='e2882295097e47fe089f8ac741a95fef47e0a73a3f3cdf21b56990638f626ea0')
version('4.8.6', sha256='8b14dd91b52862e09b8e6a963507b74bc2580787d171feda197badfa7034032c')
version('4.8.5', sha256='eb728f8268831dc4373be6403b7dd5d5dde03c169ad6882f9a8cb560df6aa138')
version('3.3.8b', sha256='1b7a1ff62ec5a9cb7a388e2ba28fda6f960b27f27999482ebeceeadb72ac9f6e')
# Add patch for compile issues with qt3 found with use in the
# OpenSpeedShop project
variant('krellpatch', default=False,
description="Build with openspeedshop based patch.")
variant('gtk', default=False,
description="Build with gtkplus.")
variant('webkit', default=False,
description="Build the Webkit extension")
variant('examples', default=False,
description="Build examples.")
variant('framework', default=bool(MACOS_VERSION),
description="Build as a macOS Framework package.")
variant('tools', default=True,
description="Build tools, including Qt Designer.")
variant('dbus', default=False,
description="Build with D-Bus support.")
variant('phonon', default=False,
description="Build with phonon support.")
variant('opengl', default=False,
description="Build with OpenGL support.")
variant('sql', default=True,
description="Build with SQL support.")
variant('shared', default=True,
description='Build shared libraries.')
variant('ssl', default=True,
description="Build with OpenSSL support.")
variant('freetype', default='spack', description='Freetype2 support',
values=('spack', 'qt', 'none'), multi=False)
# fix installation of pkgconfig files
# see https://github.com/Homebrew/homebrew-core/pull/5951
patch('restore-pc-files.patch', when='@5.9:5.11 platform=darwin')
patch('qt3accept.patch', when='@3.3.8b')
patch('qt3krell.patch', when='@3.3.8b+krellpatch')
patch('qt3ptrdiff.patch', when='@3.3.8b')
# see https://bugreports.qt.io/browse/QTBUG-57656
patch('QTBUG-57656.patch', when='@5.8.0')
# see https://bugreports.qt.io/browse/QTBUG-58038
patch('QTBUG-58038.patch', when='@5.8.0')
# https://github.com/xboxdrv/xboxdrv/issues/188
patch('btn_trigger_happy.patch', when='@5.7.0:')
# https://github.com/spack/spack/issues/1517
patch('qt5-pcre.patch', when='@5:')
patch('qt4-pcre-include-conflict.patch', when='@4.8.6')
patch('qt4-tools.patch', when='@4+tools')
if not MACOS_VERSION:
# Allow Qt's configure script to build the webkit option with more
# recent versions of gcc.
# https://github.com/spack/spack/issues/9205
# https://github.com/spack/spack/issues/9209
patch('qt4-gcc-and-webkit.patch', when='@4:4.8.6')
patch('qt4-gcc-and-webkit-487.patch', when='@4.8.7')
else:
patch('qt4-mac.patch', when='@4.8.7')
# Fix build failure with newer versions of GCC
patch('https://github.com/qt/qtbase/commit/a52d7861edfb5956de38ba80015c4dd0b596259b.patch',
sha256='c49b228c27e3ad46ec3af4bac0e9985af5b5b28760f238422d32e14f98e49b1e',
working_dir='qtbase',
when='@5.10:5.12.0 %gcc@9:')
# Fix build of QT4 with GCC 9
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=925811
patch("qt4-gcc9-qforeach.patch", when="@4:4.999 %gcc@9")
# https://bugreports.qt.io/browse/QTBUG-74196
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89585
patch('qt4-gcc8.3-asm-volatile-fix.patch', when='@4')
patch('qt5-gcc8.3-asm-volatile-fix.patch', when='@5.0.0:5.12.1')
# patch overflow builtins
patch('qt5_11-intel-overflow.patch', when='@5.11')
patch('qt5_12-intel-overflow.patch', when='@5.12:')
# Build-only dependencies
depends_on("pkgconfig", type='build')
depends_on("flex", when='+webkit', type='build')
depends_on("bison", when='+webkit', type='build')
depends_on("python", when='@5.7.0:', type='build')
# Dependencies, then variant- and version-specific dependencies
depends_on("icu4c")
depends_on("jpeg")
depends_on("libmng")
depends_on("libtiff")
depends_on("libxml2")
depends_on("zlib")
depends_on("freetype", when='freetype=spack')
depends_on("gperf", when='+webkit')
depends_on("gtkplus", when='+gtk')
depends_on("openssl", when='+ssl')
depends_on("sqlite", when='+sql', type=('build', 'run'))
depends_on("sqlite+column_metadata", when='+sql%intel', type=('build', 'run'))
depends_on("[email protected]", when='@3')
depends_on("pcre+multibyte", when='@5.0:5.8')
depends_on("inputproto", when='@:5.8')
depends_on("openssl@:1.0.999", when='@:5.9+ssl~krellpatch')
depends_on("glib", when='@4:')
depends_on("libpng", when='@4:')
depends_on("dbus", when='@4:+dbus')
depends_on("[email protected]:", when='@4:+opengl')
depends_on("harfbuzz", when='@5:')
depends_on("double-conversion", when='@5.7:')
depends_on("pcre2+multibyte", when='@5.9:')
# Non-macOS dependencies and special macOS constraints
if MACOS_VERSION is None:
depends_on("fontconfig", when='freetype=spack')
depends_on("libx11")
depends_on("libxcb")
depends_on("libxkbcommon")
depends_on("xcb-util-image")
depends_on("xcb-util-keysyms")
depends_on("xcb-util-renderutil")
depends_on("xcb-util-wm")
depends_on("libxext")
depends_on("libxrender")
conflicts('+framework',
msg="QT cannot be built as a framework except on macOS.")
else:
conflicts('platform=darwin', when='@4.8.6',
msg="QT 4 for macOS is only patched for 4.8.7")
use_xcode = True
# Mapping for compilers in the QT 'mkspecs'
compiler_mapping = {'intel': 'icc', 'clang': 'clang-libc++', 'gcc': 'g++'}
def url_for_version(self, version):
# URL keeps getting more complicated with every release
url = self.list_url
if version >= Version('4.0'):
url += str(version.up_to(2)) + '/'
else:
url += str(version.up_to(1)) + '/'
if version >= Version('4.8'):
url += str(version) + '/'
if version >= Version('5'):
url += 'single/'
url += 'qt-'
if version >= Version('4.6'):
url += 'everywhere-'
elif version >= Version('2.1'):
url += 'x11-'
if version >= Version('5.10.0'):
url += 'src-'
elif version >= Version('4.0'):
url += 'opensource-src-'
elif version >= Version('3'):
url += 'free-'
# 5.9 only has xz format. From 5.2.1 -> 5.8.0 .gz or .xz were possible
if version >= Version('5.9'):
url += str(version) + '.tar.xz'
else:
url += str(version) + '.tar.gz'
return url
def setup_build_environment(self, env):
env.set('MAKEFLAGS', '-j{0}'.format(make_jobs))
def setup_run_environment(self, env):
env.set('QTDIR', self.prefix)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('QTDIR', self.prefix)
def setup_dependent_package(self, module, dependent_spec):
module.qmake = Executable(join_path(self.spec.prefix.bin, 'qmake'))
@when('@4 platform=darwin')
def patch(self):
ogl = self.spec['opengl'] if '+opengl' in self.spec else None
deployment_target = str(MACOS_VERSION.up_to(2))
patches = {
'MACOSX_DEPLOYMENT_TARGET': deployment_target,
'PREFIX': self.prefix,
'OPENGL_INCDIR': ogl.prefix.include if ogl else "",
'OPENGL_LIBS': ogl.libs.ld_flags if ogl else "",
}
def repl(match):
# Replace the original config variable value with the one chosen
# here if it is mentioned in 'patches'; otherwise return the
# original value.
return patches.get(match.group(1), match.group(0))
files_to_filter = [
"configure",
"mkspecs/common/mac.conf",
"mkspecs/common/unix.conf",
"mkspecs/common/gcc-base-macx.conf",
"mkspecs/common/gcc-base.conf",
"qmake/generators/unix/unixmake.cpp",
"qmake/qmake.pri",
"src/tools/bootstrap/bootstrap.pro"
]
if '%clang' in self.spec:
files_to_filter += [
"mkspecs/unsupported/macx-clang-libc++/qmake.conf",
"mkspecs/common/clang.conf"
]
elif '%gcc' in self.spec:
files_to_filter += [
"mkspecs/common/g++-macx.conf",
"mkspecs/darwin-g++/qmake.conf"
]
# Filter inserted configure variables
filter_file(r'@([a-zA-Z0-9_]+)@', repl, *files_to_filter)
# Remove debug build
files_to_filter = [
"src/3rdparty/webkit/Source/WebKit.pri",
"src/3rdparty/webkit/Source/WebKit/qt/declarative/declarative.pro",
"src/imports/qimportbase.pri",
"src/plugins/qpluginbase.pri",
"src/qbase.pri",
"tools/designer/src/components/lib/lib.pro",
"tools/designer/src/lib/lib.pro",
"tools/designer/src/plugins/activeqt/activeqt.pro",
"tools/designer/src/plugins/plugins.pri",
"tools/designer/src/uitools/uitools.pro",
]
filter_file(r'(\+=.*)debug_and_release', r'\1', *files_to_filter)
@when('@4') # *NOT* darwin/mac
def patch(self):
# Fix qmake compilers in the default mkspec
filter_file('^QMAKE_CC .*', 'QMAKE_CC = cc',
'mkspecs/common/g++-base.conf')
filter_file('^QMAKE_CXX .*', 'QMAKE_CXX = c++',
'mkspecs/common/g++-base.conf')
# Necessary to build with GCC 6 and other modern compilers
# http://stackoverflow.com/questions/10354371/
filter_file('(^QMAKE_CXXFLAGS .*)', r'\1 -std=gnu++98',
'mkspecs/common/gcc-base.conf')
filter_file('^QMAKE_LFLAGS_NOUNDEF .*', 'QMAKE_LFLAGS_NOUNDEF = ',
'mkspecs/common/g++-unix.conf')
@when('@5')
def patch(self):
# Fix qmake compilers in the default mkspec
filter_file('^QMAKE_CC .*', 'QMAKE_CC = cc',
'qtbase/mkspecs/common/g++-base.conf')
filter_file('^QMAKE_CXX .*', 'QMAKE_CXX = c++',
'qtbase/mkspecs/common/g++-base.conf')
filter_file('^QMAKE_LFLAGS_NOUNDEF .*', 'QMAKE_LFLAGS_NOUNDEF = ',
'qtbase/mkspecs/common/g++-unix.conf')
@property
def common_config_args(self):
# incomplete list is here http://doc.qt.io/qt-5/configure-options.html
openssl = self.spec['openssl']
config_args = [
'-prefix', self.prefix,
'-v',
'-opensource',
'-{0}opengl'.format('' if '+opengl' in self.spec else 'no-'),
'-release',
'-confirm-license',
'-openssl-linked',
openssl.libs.search_flags,
openssl.headers.include_flags,
'-optimized-qmake',
'-no-pch',
]
if self.spec.variants['freetype'].value == 'spack':
config_args.extend([
'-system-freetype',
'-I{0}/freetype2'.format(self.spec['freetype'].prefix.include)
])
if not MACOS_VERSION:
config_args.append('-fontconfig')
elif self.spec.variants['freetype'].value == 'qt':
config_args.append('-qt-freetype')
else:
config_args.append('-no-freetype')
if '+ssl' in self.spec:
config_args.append('-openssl-linked')
else:
config_args.append('-no-openssl')
if '+sql' in self.spec:
sqlite = self.spec['sqlite']
config_args.extend([
'-system-sqlite',
'-R', sqlite.prefix.lib,
])
else:
comps = ['db2', 'ibase', 'oci', 'tds', 'mysql', 'odbc', 'psql',
'sqlite', 'sqlite2']
config_args.extend("-no-sql-" + component for component in comps)
if '+shared' in self.spec:
config_args.append('-shared')
else:
config_args.append('-static')
if self.spec.satisfies('@5:'):
pcre = self.spec['pcre'] if self.spec.satisfies('@5.0:5.8') \
else self.spec['pcre2']
harfbuzz = self.spec['harfbuzz']
config_args.extend([
'-system-harfbuzz',
harfbuzz.libs.search_flags,
harfbuzz.headers.include_flags,
'-system-pcre',
pcre.libs.search_flags,
pcre.headers.include_flags
])
if self.spec.satisfies('@5.7:'):
dc = self.spec['double-conversion']
config_args.extend([
'-system-doubleconversion',
dc.libs.search_flags,
dc.headers.include_flags
])
if '@:5.7.1' in self.spec:
config_args.append('-no-openvg')
else:
# FIXME: those could work for other versions
png = self.spec['libpng']
config_args.append('-system-libpng')
if not png.external:
config_args.extend([
png.libs.search_flags,
png.headers.include_flags
])
jpeg = self.spec['jpeg']
config_args.append('-system-libjpeg')
if not jpeg.external:
config_args.extend([
jpeg.libs.search_flags,
jpeg.headers.include_flags,
])
zlib = self.spec['zlib']
config_args.append('-system-zlib')
if not zlib.external:
config_args.extend([
zlib.libs.search_flags,
zlib.headers.include_flags
])
if '@:5.7.0' in self.spec:
config_args.extend([
# NIS is deprecated in more recent glibc,
# but qt-5.7.1 does not recognize this option
'-no-nis',
])
if '~examples' in self.spec:
config_args.extend(['-nomake', 'examples'])
if '~tools' in self.spec:
config_args.extend(['-nomake', 'tools'])
if '+dbus' in self.spec:
dbus = self.spec['dbus'].prefix
config_args.append('-dbus-linked')
config_args.append('-I%s/dbus-1.0/include' % dbus.lib)
config_args.append('-I%s/dbus-1.0' % dbus.include)
config_args.append('-L%s' % dbus.lib)
else:
config_args.append('-no-dbus')
if MACOS_VERSION:
config_args.append('-{0}framework'.format(
'' if '+framework' in self.spec else 'no-'))
# Note: QT defaults to the following compilers
# QT4 mac: gcc
# QT5 mac: clang
# linux: gcc
# In QT4, unsupported compilers lived under an 'unsupported'
# subdirectory but are now in the main platform directory.
spec = self.spec
cname = spec.compiler.name
cname = self.compiler_mapping.get(cname, cname)
is_new_qt = spec.satisfies('@5:')
platform = None
if MACOS_VERSION:
if is_new_qt and cname != "clang-libc++":
platform = 'macx-' + cname
elif not is_new_qt and cname != "g++":
platform = 'unsupported/macx-' + cname
elif cname != 'g++':
if is_new_qt:
platform = 'linux-' + cname
else:
platform = 'unsupported/linux-' + cname
if platform is not None:
config_args.extend(['-platform', platform])
return config_args
# Don't disable all the database drivers, but should
# really get them into spack at some point.
@when('@3')
def configure(self, spec, prefix):
# A user reported that this was necessary to link Qt3 on ubuntu.
# However, if LD_LIBRARY_PATH is not set the qt build fails, check
# and set LD_LIBRARY_PATH if not set, update if it is set.
if os.environ.get('LD_LIBRARY_PATH'):
os.environ['LD_LIBRARY_PATH'] += os.pathsep + os.getcwd() + '/lib'
else:
os.environ['LD_LIBRARY_PATH'] = os.pathsep + os.getcwd() + '/lib'
configure('-prefix', prefix,
'-v',
'-thread',
'-shared',
'-release',
'-fast')
@when('@4')
def configure(self, spec, prefix):
config_args = self.common_config_args
config_args.extend([
'-fast',
'-no-declarative-debug',
'-{0}gtkstyle'.format('' if '+gtk' in spec else 'no-'),
'-{0}webkit'.format('' if '+webkit' in spec else 'no-'),
'-{0}phonon'.format('' if '+phonon' in spec else 'no-'),
'-arch', str(spec.target.family),
])
# Disable phonon backend until gstreamer is setup as dependency
if '+phonon' in self.spec:
config_args.append('-no-phonon-backend')
if '~examples' in self.spec:
config_args.extend(['-nomake', 'demos'])
if MACOS_VERSION:
sdkpath = which('xcrun')('--show-sdk-path', output=str).strip()
config_args.extend([
'-cocoa',
'-sdk', sdkpath])
configure(*config_args)
@when('@5')
def configure(self, spec, prefix):
config_args = self.common_config_args
version = self.version
config_args.extend([
'-no-eglfs',
'-no-directfb',
'-{0}gtk{1}'.format(
'' if '+gtk' in spec else 'no-',
'' if version >= Version('5.7') else 'style')
])
if MACOS_VERSION:
config_args.extend([
'-no-xcb-xlib',
'-no-pulseaudio',
'-no-alsa',
])
if version < Version('5.12'):
config_args.append('-no-xinput2')
else:
# Linux-only QT5 dependencies
config_args.append('-system-xcb')
if '~webkit' in spec:
config_args.extend([
'-skip',
'webengine' if version >= Version('5.7') else 'qtwebkit',
])
if spec.satisfies('@5.7'):
config_args.extend(['-skip', 'virtualkeyboard'])
if version >= Version('5.8'):
# relies on a system installed wayland, i.e. no spack package yet
# https://wayland.freedesktop.org/ubuntu16.04.html
# https://wiki.qt.io/QtWayland
config_args.extend(['-skip', 'wayland'])
if version >= Version('5.10') and '~opengl' in spec:
config_args.extend([
'-skip', 'webglplugin',
])
configure(*config_args)
def build(self, spec, prefix):
make()
def install(self, spec, prefix):
make("install")
|
the-stack_0_8824 | import time
class Control_system():
def __init__(self, kp = 1, ki = 0, kd = 0):
self.kp = kp # Proportional gain
self.ki = ki # Integral gain
self.kd = kd # Derivative gai
self.state = 0
self.acc_error = 0 # error integral
self.der_error = 0 # error derivative
self.prev_error = 0
self.u_prev = 0
self.prev_pos = 0
self.t0 = 0
def pid(self, des, curr):
if self.state == 0: # Start initial time
self.state = 1
self.t0 = time.clock()
t1 = time.clock() # Offset time
dt = t1 - self.t0
error = des - curr # error
up = self.kp * error # actuator signal for proportional
# sigma = 1.0 # Dirty-bandwidth
# velocity = (2*sigma - dt)/(2*sigma + dt) * self.u_prev + 2/(2*sigma + dt) * (curr - prev_pos)
if self.ki != 0:
self.acc_error = self.acc_error + (error + self.prev_error) * dt / 2 # integral error
ui = self.ki * self.acc_error # actuator signal for integral
if self.kd != 0:
self.der_error = (error - self.prev_error) / dt # derivative error
ud = self.kd * self.der_error # actuator signal for derivative
self.t0 = t1
self.prev_error = error
u = up + ui + ud
self.u_prev = u
self.prev_pos = curr
return u
|
the-stack_0_8826 | import scrapy # noqa: F401
import snoop
import isort # noqa: F401
from itertools import zip_longest
class SPIDER_2084(scrapy.Spider):
name = 'spider_2084'
start_urls = ["https://leodido.dev/demystifying-profraw/"]
@snoop
def parse(self, response):
srch_titles = response.xpath("//h1/text()").getall()
srch_links = response.xpath("//a/@href").getall()
srch_content = response.xpath("//p/text()").getall()
srch_images = response.xpath("//img/@src").getall()
for item in zip_longest(srch_titles, srch_links, srch_content, srch_images, fillvalue='missing'):
results = {
"title": item[0],
"links": item[1],
"content": item[2],
"images": item[3],
}
yield results
|
the-stack_0_8827 | from manimlib.animation.creation import ShowCreation
from manimlib.animation.fading import FadeIn
from manimlib.animation.transform import MoveToTarget
from manimlib.animation.transform import Transform
from manimlib.constants import *
from manimlib.mobject.geometry import Arrow
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Dot
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.scene.scene import Scene
class CountingScene(Scene):
CONFIG = {
"digit_place_colors": [YELLOW, MAROON_B, RED, GREEN, BLUE, PURPLE_D],
"counting_dot_starting_position": (FRAME_X_RADIUS - 1) * RIGHT + (FRAME_Y_RADIUS - 1) * UP,
"count_dot_starting_radius": 0.5,
"dot_configuration_height": 2,
"ones_configuration_location": UP + 2 * RIGHT,
"num_scale_factor": 2,
"num_start_location": 2 * DOWN,
}
def setup(self):
self.dots = VGroup()
self.number = 0
self.max_place = 0
self.number_mob = VGroup(TexMobject(str(self.number)))
self.number_mob.scale(self.num_scale_factor)
self.number_mob.shift(self.num_start_location)
self.dot_templates = []
self.dot_template_iterators = []
self.curr_configurations = []
self.arrows = VGroup()
self.add(self.number_mob)
def get_template_configuration(self, place):
# This should probably be replaced for non-base-10 counting scenes
down_right = (0.5) * RIGHT + (np.sqrt(3) / 2) * DOWN
result = []
for down_right_steps in range(5):
for left_steps in range(down_right_steps):
result.append(
down_right_steps * down_right + left_steps * LEFT
)
return reversed(result[:self.get_place_max(place)])
def get_dot_template(self, place):
# This should be replaced for non-base-10 counting scenes
dots = VGroup(*[
Dot(
point,
radius=0.25,
fill_opacity=0,
stroke_width=2,
stroke_color=WHITE,
)
for point in self.get_template_configuration(place)
])
dots.set_height(self.dot_configuration_height)
return dots
def add_configuration(self):
new_template = self.get_dot_template(len(self.dot_templates))
new_template.move_to(self.ones_configuration_location)
left_vect = (new_template.get_width() + LARGE_BUFF) * LEFT
new_template.shift(
left_vect * len(self.dot_templates)
)
self.dot_templates.append(new_template)
self.dot_template_iterators.append(
it.cycle(new_template)
)
self.curr_configurations.append(VGroup())
def count(self, max_val, run_time_per_anim=1):
for x in range(max_val):
self.increment(run_time_per_anim)
def increment(self, run_time_per_anim=1):
moving_dot = Dot(
self.counting_dot_starting_position,
radius=self.count_dot_starting_radius,
color=self.digit_place_colors[0],
)
moving_dot.generate_target()
moving_dot.set_fill(opacity=0)
kwargs = {
"run_time": run_time_per_anim
}
continue_rolling_over = True
first_move = True
place = 0
while continue_rolling_over:
added_anims = []
if first_move:
added_anims += self.get_digit_increment_animations()
first_move = False
moving_dot.target.replace(
next(self.dot_template_iterators[place])
)
self.play(MoveToTarget(moving_dot), *added_anims, **kwargs)
self.curr_configurations[place].add(moving_dot)
if len(self.curr_configurations[place].split()) == self.get_place_max(place):
full_configuration = self.curr_configurations[place]
self.curr_configurations[place] = VGroup()
place += 1
center = full_configuration.get_center_of_mass()
radius = 0.6 * max(
full_configuration.get_width(),
full_configuration.get_height(),
)
circle = Circle(
radius=radius,
stroke_width=0,
fill_color=self.digit_place_colors[place],
fill_opacity=0.5,
)
circle.move_to(center)
moving_dot = VGroup(circle, full_configuration)
moving_dot.generate_target()
moving_dot[0].set_fill(opacity=0)
else:
continue_rolling_over = False
def get_digit_increment_animations(self):
result = []
self.number += 1
is_next_digit = self.is_next_digit()
if is_next_digit:
self.max_place += 1
new_number_mob = self.get_number_mob(self.number)
new_number_mob.move_to(self.number_mob, RIGHT)
if is_next_digit:
self.add_configuration()
place = len(new_number_mob.split()) - 1
result.append(FadeIn(self.dot_templates[place]))
arrow = Arrow(
new_number_mob[place].get_top(),
self.dot_templates[place].get_bottom(),
color=self.digit_place_colors[place]
)
self.arrows.add(arrow)
result.append(ShowCreation(arrow))
result.append(Transform(
self.number_mob, new_number_mob,
lag_ratio=0.5
))
return result
def get_number_mob(self, num):
result = VGroup()
place = 0
max_place = self.max_place
while place < max_place:
digit = TexMobject(str(self.get_place_num(num, place)))
if place >= len(self.digit_place_colors):
self.digit_place_colors += self.digit_place_colors
digit.set_color(self.digit_place_colors[place])
digit.scale(self.num_scale_factor)
digit.next_to(result, LEFT, buff=SMALL_BUFF, aligned_edge=DOWN)
result.add(digit)
place += 1
return result
def is_next_digit(self):
return False
def get_place_num(self, num, place):
return 0
def get_place_max(self, place):
return 0
class PowerCounter(CountingScene):
def is_next_digit(self):
number = self.number
while number > 1:
if number % self.base != 0:
return False
number /= self.base
return True
def get_place_max(self, place):
return self.base
def get_place_num(self, num, place):
return (num / (self.base ** place)) % self.base
class CountInDecimal(PowerCounter):
CONFIG = {
"base": 10,
}
def construct(self):
for x in range(11):
self.increment()
for x in range(85):
self.increment(0.25)
for x in range(20):
self.increment()
class CountInTernary(PowerCounter):
CONFIG = {
"base": 3,
"dot_configuration_height": 1,
"ones_configuration_location": UP + 4 * RIGHT
}
def construct(self):
self.count(27)
# def get_template_configuration(self):
# return [ORIGIN, UP]
class CountInBinaryTo256(PowerCounter):
CONFIG = {
"base": 2,
"dot_configuration_height": 1,
"ones_configuration_location": UP + 5 * RIGHT
}
def construct(self):
self.count(128, 0.3)
def get_template_configuration(self):
return [ORIGIN, UP]
class FactorialBase(CountingScene):
CONFIG = {
"dot_configuration_height": 1,
"ones_configuration_location": UP + 4 * RIGHT
}
def construct(self):
self.count(30, 0.4)
def is_next_digit(self):
return self.number == self.factorial(self.max_place + 1)
def get_place_max(self, place):
return place + 2
def get_place_num(self, num, place):
return (num / self.factorial(place + 1)) % self.get_place_max(place)
def factorial(self, n):
if (n == 1):
return 1
else:
return n * self.factorial(n - 1)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.