ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a36a4796c082fed9680153659dffbb132a1edf8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - a RAMSES-II protocol decoder & analyser."""
import logging
from inspect import getmembers, isclass
from sys import modules
from typing import List
from .const import Discover, __dev_mode__
from .protocol import I_, RP, RQ, W_ # noqa: F401, isort: skip
from .protocol import ( # noqa: F401, isort: skip
_0001,
_0002,
_0004,
_0005,
_0006,
_0008,
_0009,
_000A,
_000C,
_000E,
_0016,
_0100,
_0150,
_01D0,
_01E9,
_0404,
_0418,
_042F,
_0B04,
_1030,
_1060,
_1081,
_1090,
_1098,
_10A0,
_10B0,
_10E0,
_10E1,
_1100,
_1260,
_1280,
_1290,
_1298,
_12A0,
_12B0,
_12C0,
_12C8,
_12F0,
_1300,
_1F09,
_1F41,
_1FC9,
_1FD0,
_1FD4,
_2249,
_22C9,
_22D0,
_22D9,
_22F1,
_22F3,
_2309,
_2349,
_2389,
_2400,
_2401,
_2410,
_2420,
_2D49,
_2E04,
_30C9,
_3120,
_313F,
_3150,
_31D9,
_31DA,
_31E0,
_3200,
_3210,
_3220,
_3221,
_3223,
_3B00,
_3EF0,
_3EF1,
_PUZZ,
)
DEFAULT_BDR_ID = "13:000730"
DEFAULT_EXT_ID = "17:000730"
DEFAULT_THM_ID = "03:000730"
_QOS_TX_LIMIT = 12
DEV_MODE = __dev_mode__ and False
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
def class_by_attr(name: str, attr: str) -> dict:
"""Return a mapping of a (unique) attr of classes in a module to that class.
For example:
{"OTB": OtbGateway, "CTL": Controller}
{"RAD": RadZone, "UFH": UfhZone}
{"evohome": Evohome}
"""
return {
getattr(c[1], attr): c[1]
for c in getmembers(
modules[name],
lambda m: isclass(m) and m.__module__ == name and hasattr(m, attr),
)
}
def discover_decorator(fnc):
# NOTE: only need to Wrap top-level entities
def wrapper(self, discover_flag=Discover.ALL) -> None:
if self._gwy.config.disable_discovery:
return
if not discover_flag:
return
return fnc(self, discover_flag=discover_flag)
return wrapper
class Entity:
"""The Device/Zone base class.
This class is mainly concerned with the entity's state database.
"""
def __init__(self, gwy) -> None:
self._loop = gwy._loop
self._gwy = gwy
self.id = None
self._msgs = {}
self._msgz = {}
self._qos_tx_count = 0 # the number of pkts Tx'd with no matching Rx
def _qos_function(self, pkt, reset=False) -> None:
if reset:
self._qos_tx_count = 0
return
self._qos_tx_count += 1
if self._qos_tx_count == _QOS_TX_LIMIT:
_LOGGER.warning(
f"{pkt} < Sending now deprecated for {self} "
"(consider adjusting device_id filters)"
) # TODO: take whitelist into account
def _discover(self, discover_flag=Discover.ALL) -> None:
pass
def _handle_msg(self, msg) -> None: # TODO: beware, this is a mess
if (
self._gwy.pkt_protocol is None
or msg.src.id != self._gwy.pkt_protocol._hgi80.get("device_id")
):
self._qos_function(msg._pkt, reset=True)
if msg.verb in (I_, RP):
self._msgs[msg.code] = msg
if msg.code not in self._msgz:
self._msgz[msg.code] = {msg.verb: {msg._pkt._ctx: msg}}
elif msg.verb not in self._msgz[msg.code]:
self._msgz[msg.code][msg.verb] = {msg._pkt._ctx: msg}
else:
self._msgz[msg.code][msg.verb][msg._pkt._ctx] = msg
# TODO:
# if msg.verb == RP and msg._pkt._idx in self._msgz[msg.code].get(I_, []):
# assert msg.raw_payload == self._msgz[msg.code][I_][msg._pkt._idx].raw_payload, (
# f"\r\n{msg._pkt} ({msg._pkt._idx}),"
# f"\r\n{self._msgz[msg.code][I_][msg._pkt._idx]._pkt} ({msg._pkt._idx})"
# )
# del self._msgz[msg.code][I_][msg._pkt._idx]
# elif msg.verb == I_ and msg._pkt._idx in self._msgz[msg.code].get(RP, []):
# assert msg.raw_payload == self._msgz[msg.code][RP][msg._pkt._idx].raw_payload, (
# f"\r\n{msg._pkt} ({msg._pkt._idx}),"
# f"\r\n{self._msgz[msg.code][RP][msg._pkt._idx]._pkt} ({msg._pkt._idx})"
# )
# del self._msgz[msg.code][RP][msg._pkt._idx]
@property
def _msg_db(self) -> List: # a flattened version of _msgz[code][verb][indx]
"""Return a flattened version of _msgz[code][verb][indx]."""
return [m for c in self._msgz.values() for v in c.values() for m in v.values()]
# @property
# def _pkt_db(self) -> Dict:
# """Return a flattened version of ..."""
# return {msg.dtm: msg._pkt for msg in self._msgs_db}
def _make_cmd(self, code, dest_id, payload, verb=RQ, **kwargs) -> None:
self._send_cmd(self._gwy.create_cmd(verb, dest_id, code, payload, **kwargs))
def _send_cmd(self, cmd, **kwargs) -> None:
if self._gwy.config.disable_sending:
_LOGGER.info(f"{cmd} < Sending is disabled")
return
if self._qos_tx_count > _QOS_TX_LIMIT:
_LOGGER.info(f"{cmd} < Sending is deprecated for {self}")
return
if getattr(self, "has_battery", None) and cmd.dst.id == self.id:
_LOGGER.info(f"{cmd} < Sending inadvisable for {self} (has a battery)")
cmd._source_entity = self
# self._msgs.pop(cmd.code, None) # NOTE: Cause of DHW bug
self._gwy.send_cmd(cmd)
def _msg_value(self, code, *args, **kwargs) -> dict:
if isinstance(code, (str, tuple)): # a code or a tuple of codes
return self._msg_value_code(code, *args, **kwargs)
return self._msg_value_msg(code, *args, **kwargs) # assume is a Message
def _msg_value_code(self, code, verb=None, key=None, **kwargs) -> dict:
assert (
not isinstance(code, tuple) or verb is None
), f"Unsupported: using a tuple ({code}) with a verb ({verb})"
if verb:
try:
msgs = self._msgz[code][verb]
except KeyError:
msg = None
else:
msg = max(msgs.values()) if msgs else None
elif isinstance(code, tuple):
msgs = [m for m in self._msgs.values() if m.code in code]
msg = max(msgs) if msgs else None
else:
msg = self._msgs.get(code)
return self._msg_value_msg(msg, key=key, **kwargs)
def _msg_value_msg(self, msg, key=None, zone_idx=None, domain_id=None) -> dict:
if msg is None:
return
elif msg._expired:
delete_msg(msg)
if domain_id:
idx, val = "domain_id", domain_id
elif zone_idx:
idx, val = "zone_idx", zone_idx
else:
idx = val = None
if isinstance(msg.payload, list) and idx:
msg_dict = {
k: v for d in msg.payload for k, v in d.items() if d[idx] == val
}
elif isinstance(msg.payload, list):
# TODO: this isn't ideal: e.g. a controller is being treated like a 'stat
# I 101 --:------ --:------ 12:126457 2309 006 0107D0-0207D0 # is a CTL
msg_dict = msg.payload[0]
else:
msg_dict = msg.payload
assert (
not domain_id and not zone_idx or msg_dict.get(idx) == val
), f"{msg_dict} < Coding error: key={idx}, val={val}"
if key:
return msg_dict.get(key)
return {
k: v
for k, v in msg_dict.items()
if k not in ("dhw_idx", "domain_id", "zone_idx") and k[:1] != "_"
}
@property
def _codes(self) -> dict:
return {
"codes": sorted([k for k, v in self._msgs.items()]),
}
@property
def controller(self): # -> Optional[Controller]:
"""Return the entity's controller, if known."""
return self._ctl # TODO: if the controller is not known, try to find it?
def delete_msg(msg) -> None:
"""Remove the msg from all state databases."""
entities = [msg.src]
if hasattr(msg.src, "_evo"):
entities.append(msg.src._evo)
if msg.src._evo._dhw:
entities.append(msg.src._evo._dhw)
entities.extend(msg.src._evo.zones)
# remove the msg from all the state DBs
for obj in entities:
if msg in obj._msgs.values():
del obj._msgs[msg.code]
try:
del obj._msgz[msg.code][msg.verb][msg._pkt._ctx]
except KeyError:
pass
|
py | 1a36a58b17bbd27da976f22e77f4b926e3e41205 | from .shallow_partial import *
import sys
def get_model(model_params):
return PartialNet(model_params['width'], model_params['height'],\
model_params['ndf'], model_params['dilation'],\
model_params['norm_type'],\
model_params['upsample_type']) |
py | 1a36a6bbe0c0e64b8fb14558ee4fcde2779ef10f | from Bio import SeqIO
import numpy as np
import re
'''
For each chromosome, this file answers the following questions:
1. What is the length of the chromosome?
2. How many confirmed replication origins?
3. What is the length of each confirmed replication origin? Start? Finish?
4. What is the length of the whole genome?
'''
data_path = 'Saccharomyces-Cerevisiae'
genome_length = 0
stats = ''
for chromosome_number in range(1, 17):
chromosome = data_path + '/chr%02d.fsa'%chromosome_number
confirmed_origins = data_path + '/chr%02d-confirmed.fsa'%chromosome_number
fasta_sequences = SeqIO.parse(open(chromosome),'fasta')
for fasta in fasta_sequences:
name, cerevisiae_chromosome = fasta.id, str(fasta.seq)
subtitle = '## Chromosome ' + str(chromosome_number) + ' has ' + str(len(cerevisiae_chromosome)) + ' base pairs'
details = ''
genome_length += len(cerevisiae_chromosome)
origins = 0
origins_length = 0
fasta_sequences = SeqIO.parse(open(confirmed_origins),'fasta')
for fasta in fasta_sequences:
name, description, replication_origin = fasta.id, fasta.description, str(fasta.seq)
origins += 1
origins_length += len(replication_origin)
m = re.search('range=(.+?) ', description)
if m:
details += str(origins) + ". The confirmed replication origin **" + name + "** range is *" + m.group(1) + "*, with a length of **" + str(len(replication_origin)) + "** base pairs\n"
subtitle += ", and " + str(origins) + " confirmed replication origins with an average length of " + str((origins_length/origins)) + " base pairs\n"
stats += subtitle + details + "\n\n"
title = "# Saccharomayces Ceravesiae genome length is " + str(genome_length) + "\n\n"
with open('README.md', 'w') as f:
f.write(title)
f.write(stats)
print "Stats print to file successfully" |
py | 1a36a6c1808377b61d105d84d3e1d967a5819b52 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ........................................ NOTICE
#
# This file has been derived and modified from a source licensed under Apache Version 2.0.
# See files NOTICE and README.md for more details.
#
# ........................................ ******
"""Unit-test code for logtools"""
import os
import sys
import unittest
import logging
from tempfile import mkstemp
from datetime import datetime
from io import StringIO
from operator import itemgetter
from logtools import (filterbots, logfilter, geoip, logsample, logsample_weighted,
logparse, urlparse, logmerge, logplot, qps, sumstat)
from logtools.parsers import *
from logtools import logtools_config, interpolate_config, AttrDict
logging.basicConfig(level=logging.INFO)
class ConfigurationTestCase(unittest.TestCase):
def testInterpolation(self):
self.assertEqual(1, interpolate_config(1, 'bogus_sec', 'bogus_key'))
self.assertRaises(KeyError, interpolate_config, None, 'bogus_sec', 'bogus_key')
class URLParseTestCase(unittest.TestCase):
def setUp(self):
self.rows = [
"http://www.mydomain.com/my/path/myfile?myparam1=myval1&myparam2=myval2",
"http://www.mydomain2.com",
"http://www.mydomain3.com/home",
"http://fun.com/index.php?home"
]
def testUrlParse(self):
i=0
for row in urlparse(StringIO('\n'.join(self.rows)+'\n'), part='netloc'):
i+=1
self.assertEqual(i, len(self.rows), \
"Number of rows output is not equal to input size")
def testMultipleQueryParams(self):
url = "http://www.mydomain.com/my/path/myfile?myparam1=myval1&myparam2=myval2"
for row in urlparse(StringIO(url+"\n"), part='query', query_params='myparam1,myparam2'):
self.assertEqual(row[0], 'myval1', "Returned query param value was not as expected: %s" % \
row)
class ParsingTestCase(unittest.TestCase):
def setUp(self):
self.clf_rows = [
'127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326',
'127.0.0.2 - jay [10/Oct/2000:13:56:12 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326'
]
self.json_rows = [
'{"key1":"val1","key2":true,"key3":31337,"key4":null,"nested_key":[{"nested_key_1":"2"}]}'
]
self.uwsgi_rows = [
"[pid: 11216|app: 0|req: 2680/5864] 24.218.159.119 () {40 vars in 957 bytes} [Thu Jun 13 22:29:59 2013] GET /my/uri/path/?param_id=52&token=s61048gkje_l001z => generated 1813 bytes in 11 msecs (HTTP/1.1 200) 2 headers in 73 bytes (1 switches on core 0)",
"[pid: 11217|app: 0|req: 3064/5865] 10.18.50.145 () {34 vars in 382 bytes} [Thu Jun 13 22:30:00 2013] GET / => generated 8264 bytes in 9 msecs (HTTP/1.1 200) 2 headers in 73 bytes (1 switches on core 0)"
]
def testJSONParser(self):
parser = JSONParser()
for logrow in self.json_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testAccessLog(self):
parser = AccessLog()
parser.set_format(format='%h %l %u %t "%r" %>s %b')
self.assertRaises(ValueError, parser, 'example for invalid format')
for logrow in self.clf_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testCommonLogFormat(self):
parser = CommonLogFormat()
self.assertRaises(ValueError, parser, 'example for invalid format')
for logrow in self.clf_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testuWSGIParser(self):
parser = uWSGIParser()
for logrow in self.uwsgi_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % logrow)
def testLogParse(self):
options = AttrDict({'parser': 'CommonLogFormat', 'field': 4, 'header': False})
fh = StringIO('\n'.join(self.clf_rows))
output = [l for l in logparse(options, None, fh)]
self.assertEqual(len(output), len(self.clf_rows), "Output size was not equal to input size!")
def testMultiKeyGetter(self):
parser = parser = CommonLogFormat()
func = multikey_getter_gen(parser, keys=(1,2), is_indices=True)
fh = StringIO('\n'.join(self.clf_rows))
output = [func(l) for l in fh]
self.assertEqual(len(output), len(self.clf_rows), "Output size was not equal to input size!")
class FilterBotsTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({
"reverse": False,
"unescape": False,
"printlines": False,
"ip_ua_re": "^(?P<ip>.*?) - USER_AGENT:'(?P<ua>.*?)'",
"bots_ips": StringIO("\n".join([
"6.6.6.6"
]) + "\n"),
"bots_ua": StringIO("\n".join([
"## Example comment ##",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"ssearch_bot/Nutch-1.0 (sSearch Crawler; http://www.semantissimo.de)",
"r'.*crawler'",
"s'MSIECrawler)'",
"p'DotSpotsBot'",
"p'Java/'"
]) + "\n")
})
self.fh = StringIO(
"127.0.0.1 - USER_AGENT:'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' - ...\n" \
"255.255.255.255 - USER_AGENT:'Mozilla' - ...\n" \
"1.1.1.1 - USER_AGENT:'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; MSIECrawler)'\n" \
"2.2.2.2 - USER_AGENT:'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Win 9x 4.90; .NET CLR 1.1.4322; MSIECrawler)'\n" \
"3.3.3.3 - USER_AGENT:'DotSpotsBot/0.2 (crawler; support at dotspots.com)'\n" \
"4.4.4.4 - USER_AGENT:'inagist.com url crawler'\n" \
"5.5.5.5 - USER_AGENT:'Java/1.6.0_18'\n" \
"6.6.6.6 - USER_AGENT:'ssearch_bot/Nutch-1.0 (sSearch Crawler; http://www.semantissimo.de)'\n"
)
self.json_fh = StringIO(
'''{"timestamp":"2010\/09\/01 00:00:01","user_agent":"Mozilla\/5.0 (compatible; Googlebot\/2.1; +http:\/\/www.google.com\/bot.html)","user_ip":"66.249.71.108"}\n''' \
'''{"timestamp":"2010\/10\/01 11:00:01","user_agent":"Mozilla\/5.0 (compatible; Googlebot\/2.1; +http:\/\/www.google.com\/bot.html)","user_ip":"66.249.71.109"}\n''' \
'''{"timestamp":"2010\/09\/01 00:00:01","user_agent":"Mozilla\/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.11) Gecko\/20100701 Firefox\/3.5.11 (.NET CLR 3.5.30729)","user_ip":"100.100.1.100"}\n''' \
'''{"timestamp":"2010\/10\/01 00:00:01","user_agent":"Mozilla\/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.11) Gecko\/20100701 Firefox\/3.5.11 (.NET CLR 3.5.30729)","user_ip":"6.6.6.6"}\n''' \
)
def testParserFiltering(self):
json_options = self.options
json_options['parser'] = 'JSONParser'
json_options['ip_ua_fields'] = 'ua:user_agent,ip:user_ip'
i=0
for l in filterbots(fh=self.json_fh, **json_options):
i+=1
self.assertEqual(i, 1, "filterbots output size different than expected: %s" % str(i))
def testRegExpFiltering(self):
i=0
for l in filterbots(fh=self.fh, **self.options):
i+=1
self.assertEqual(i, 1, "filterbots output size different than expected: %s" % str(i))
class GeoIPTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({ 'ip_re': '^(.*?) -' })
self.fh = StringIO(
"127.0.0.1 - USER_AGENT:'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' - ...\n" \
"255.255.255.255 - USER_AGENT:'Mozilla' - ...\n" \
"74.125.225.48 - USER_AGENT:'IE' - ...\n" \
"65.55.175.254 - USER_AGENT:'IE' - ...\n"
)
def testGeoIP(self):
try:
import GeoIP
except ImportError:
print( "GeoIP Python package not available - skipping geoip unittest.",
file = sys.stderr)
return
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEqual(len(output), 2, "Output size was different than expected: %s" % str(len(output)))
def testFilter(self):
"""Test GeoIP filtering functionality"""
try:
import GeoIP
except ImportError:
print ("GeoIP Python package not available - skipping geoip unittest.",
file = sys.stderr)
return
# Check positive filter
self.options['filter'] = 'United States'
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEqual(len(output), 2, "Output size was different than expected: %s" % str(len(output)))
# Check negative filter
self.options['filter'] = 'India'
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEquals(len(output), 0, "Output size was different than expected: %s" % str(len(output)))
class SamplingTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({ 'num_samples': 1 })
self.weighted_opts = AttrDict({
'num_samples': 5,
'field': 1,
'delimiter': ' '
})
self.fh = StringIO("\n".join([
'5 five', '1 one', '300 threehundred', '500 fivehundred',
'0 zero', '-1 minusone', '670 sixhundredseventy', '1000 thousand',
'22 twentytwo', '80 eighty', '3 three'
]))
def testUniformSampling(self):
output = [r for r in logsample(fh=self.fh, **self.options)]
self.assertEqual(len(output), self.options.num_samples,
"logsample output size different than expected: %s" % len(output))
def testWeightedSampling(self):
output = [(k, r) for k, r in logsample_weighted(fh=self.fh, **self.weighted_opts)]
self.assertEqual(len(output), self.weighted_opts.num_samples,
"logsample output size different than expected: %s" % len(output))
class FilterTestCase(unittest.TestCase):
"""Unit-test for the logfilter functionality"""
def setUp(self):
self.testset = StringIO("\n".join([
"AA word",
"word AA word",
"word AA",
"AA",
"aa word",
"wordAA",
"AAword",
"wordAAword",
"CC DD word"
])+"\n")
self.exp_emitted_wb = 4
self.exp_emitted = 1
self.blacklist = StringIO("\n".join([
'AA',
'bb',
'CC DD'
])+"\n")
def testACWB(self):
"""Aho-Corasick-based matching with Word Boundaries"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=True, ignorecase=False,
word_boundaries=True):
#print(l)
lines += 1
self.assertEqual(lines, self.exp_emitted_wb, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted_wb))
def testAC(self):
"""Aho-Corasick-based matching"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=True, ignorecase=False,
word_boundaries=False):
#print(l)
lines += 1
self.assertEqual(lines, self.exp_emitted, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted))
def testRE(self):
"""Regular Expression-based matching"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=False, ignorecase=False,
word_boundaries=False):
#print( l)
lines += 1
self.assertEqual(lines, self.exp_emitted, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted))
def testREWB(self):
"""Regular Expression-based matching with Word Boundaries"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=False, ignorecase=False,
word_boundaries=True):
#print( l)
lines += 1
self.assertEqual(lines, self.exp_emitted_wb, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted_wb))
class MergeTestCase(unittest.TestCase):
def setUp(self):
self.tempfiles = [mkstemp(), mkstemp(), mkstemp()]
self.args = [fname for fh, fname in self.tempfiles]
def tearDown(self):
"""Cleanup temporary files created by test"""
for fh, fname in self.tempfiles:
os.remove(fname)
def testNumericMerge(self):
t1 =['1 one', '5 five', '300 threehundred',
'500 fivehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 = ['-1 minusone', '0 zero',
'670 sixhundredseventy' ,'1000 thousand']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3= ['3 three', '22 twentytwo', '80 eighty']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
options = AttrDict({'delimiter': ' ', 'field': 1, 'numeric': True })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 11, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted ( list( map( lambda x: int(x[0]), output))),
"Output was not numerically sorted!")
def testDateMerge(self):
t1 = ['2010/01/12 07:00:00,one', '2010/01/12 08:00:00,five',
'2010/01/13 10:00:00,threehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 =['2010/01/12 07:30:00,one', '2010/01/12 08:10:00,five',
'2010/01/12 21:00:00,threehundred']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3 = ['2010/01/11 05:33:03,one', '2010/01/12 03:10:00,five',
'2010/01/21 22:00:00,threehundred']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
dateformat = '%Y/%m/%d %H:%M:%S'
options = AttrDict({'delimiter': ',', 'field': 1, 'datetime': True, 'dateformat': dateformat })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 9, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted( list( map(itemgetter(0), output))),
"Output was not time sorted!")
def testLexicalMerge(self):
t1 = ['1 one', '300 threehundred', '5 five',
'500 fivehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 = ['-1 minusone', '0 zero', '1000 thousand',
'670 sixhundredseventy']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3 = ['22 twentytwo', '3 three',
'80 eighty']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
options = AttrDict({ 'delimiter': ' ', 'field': 1, 'numeric': False })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 11, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted( list( map(itemgetter(0), output))),
"Output was not lexically sorted!")
#
# QPS: Queries Per Second
#
class QPSTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({
"ignore": True,
"dt_re": r'^\[(.*?)\]',
"dateformat": "%d/%b/%Y:%H:%M:%S -0700",
"window_size": 15
})
self.fh = StringIO(
'[10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:55:38 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:56:59 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:57:01 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:00 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:13 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:14 -0700] "GET /apache_pb.gif HTTP/1.0" \n'
)
def testQps(self):
blocks=0
qs=[]
qpsVal = list( qps(fh=self.fh, **self.options))
sys.stderr.write(f"In testQps, qpsVal ({type(qpsVal)}):\t{qpsVal}\n")
for q in qpsVal:
blocks+=1
qs.append(q)
self.assertEqual(blocks, 3,
"qps output size different than expected: %s" % str(blocks))
class PlotTestCase(unittest.TestCase):
def setUp(self):
self.fh = StringIO("\n".join([
'5 five', '1 one', '300 threehundred', '500 fivehundred',
'0 zero', '-1 minusone', '670 sixhundredseventy', '1000 thousand',
'22 twentytwo', '80 eighty', '3 three'
]))
def testGChart(self):
try:
import pygooglechart
except ImportError:
print( "pygooglechart Python package not available - skipping logplot gchart unittest.",
file = sys.stderr)
return
options = AttrDict({
'backend': 'gchart',
'output': False,
'limit': 10,
'field': 1,
'delimiter': ' ',
'legend': True,
'width': 600,
'height': 300
})
chart = None
for plot_type in ('pie', 'line'):
self.fh.seek(0)
options['type'] = plot_type
chart = logplot(options, None, self.fh)
self.assertNotEqual(chart, None, "logplot returned None. Expected a Plot object")
# Should raise ValueError here due to fh being at EOF
self.assertRaises(ValueError, logplot, options, None, self.fh)
tmp_fh, tmp_fname = mkstemp()
chart.download(tmp_fname)
os.remove(tmp_fname)
class SumstatTestCase(unittest.TestCase):
def setUp(self):
self.data = StringIO('\n'.join([
'500 val1',
'440 val2',
'320 val3',
'85 val4',
'13 val5'
]))
self.avg = 271.6
self.N = 1358
self.M = 5
def testSumstat(self):
stat = sumstat(fh=self.data, delimiter=' ', reverse=True)
self.assertEqual(stat['M'], self.M)
self.assertEqual(stat['N'], self.N)
self.assertEqual(stat['avg'], self.avg)
if __name__ == "__main__":
unittest.main()
|
py | 1a36a9f7d9bc4e9c873f31763a21aeb0cd62c468 | from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contacts_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("index.php") and len(wd.find_elements_by_link_text("Last name")) > 0
and len(wd.find_elements_by_link_text("All phones")) > 0):
wd.find_element_by_link_text("home").click()
def fill_form(self, contact):
# fill in form
self.change_field_value("firstname", contact.first_name)
self.change_field_value("middlename", contact.middle_name)
self.change_field_value("lastname", contact.last_name)
self.change_field_value("nickname", contact.nick_name)
self.change_field_value("address", contact.address)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("home", contact.home_phone)
self.change_field_value("work", contact.work_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("fax", contact.fax_phone)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def create(self, contact):
wd = self.app.wd
self.open_contacts_page()
# click on add new
wd.find_element_by_link_text("add new").click()
self.fill_form(contact)
# submit form
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.contact_cache = None
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# confirm deletion
wd.switch_to_alert().accept()
self.open_contacts_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# confirm deletion
wd.switch_to_alert().accept()
self.open_contacts_page()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_index(index)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
def edit_contact_by_id(self, id, contact):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_id(id)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def open_contact_to_edit_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def edit_first_contact(self, contact):
wd = self.app.wd
self.open_contacts_page()
# select first contact
self.open_contact_to_edit_by_index(0)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = row.find_element_by_name("selected[]").get_attribute("value")
all_phones = cells[5].text
address = cells[3].text
all_emails = cells[4].text
self.contact_cache.append(Contact(first_name=first_name, last_name=last_name, contact_id=id,
all_phones_from_home_page=all_phones, address=address,
all_emails_from_home_page=all_emails))
return list(self.contact_cache)
def get_simple_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = row.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(Contact(first_name=first_name, last_name=last_name, contact_id=id))
return list(self.contact_cache)
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_xpath("//img[@alt='Edit']"))
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
middlename = wd.find_element_by_name("middlename").get_attribute("value")
nickname = wd.find_element_by_name("nickname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
faxphone = wd.find_element_by_name("fax").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, contact_id=id, home_phone=homephone, mobile_phone=mobilephone,
work_phone=workphone, fax_phone=faxphone, middle_name=middlename, nick_name=nickname,
email=email, email2=email2, email3=email3, address=address)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
faxphone = re.search("F: (.*)", text).group(1)
return Contact(home_phone=homephone, mobile_phone=mobilephone, work_phone=workphone, fax_phone=faxphone)
|
py | 1a36aa7102f4ab3bbc9ebac003e24804496595a9 | from VorDiff.nodes.scalar import Scalar
from VorDiff.nodes.vector import Vector, Element
import numpy as np
class AutoDiff():
'''
The AutoDiff class allows users to define Scalar variables and
interface with the auto-differentiator.
'''
@staticmethod
def scalar(val):
'''
Creates a Scalar object with the value given and derivative 1
INPUTS
=======
val: The numeric value at which to evaluate
RETURNS
=======
Scalar objects
'''
return Scalar(val, 1)
def element(val,jacob):
'''
Creates an Element object with the value given and jacobian matrix
INPUTS
=======
val: The numeric value of the function
jacob: The jacobian matrix value of the function at which to evaluate
RETURNS
=======
Element objects
'''
return Element(val,jacob)
@staticmethod
def vector(vec):
'''
Creates a Vector object with the vector given and the jacobian matrix
INPUTS
=======
val: The numeric values at which to evaluate
RETURNS
=======
Vector objects
'''
return Vector(vec, np.eye(len(vec)))
|
py | 1a36aaeaf5c122d0a0f0ae723047a8aefdb3767a | from abc import ABC, abstractmethod
class Database(ABC):
def __init__(self, connection_string):
self.connection_string = connection_string
@abstractmethod
def get_database(self):
pass
@abstractmethod
def inset_to_database(self,item):
pass
|
py | 1a36ac29e55918cbb9b571facb07654c4700fb4e | #-*-coding:utf-8-*-
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.dataset_mode == 'aligned':
from data.aligned_dataset import AlignedDataset
dataset = AlignedDataset()
elif opt.dataset_mode == 'single':
from data.single_dataset import SingleDataset
dataset = SingleDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i >= self.opt.max_dataset_size:
break
yield data |
py | 1a36ac7e0d60016c288fe3fff54aa4c94a0050e3 | import math
import os
import random
import time
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import tifffile
import torch
from torch.utils.data import Dataset
# VV mean: -15.830463789539426
# VV std: 6.510123043441801
# VH mean: -24.66130160959856
# VH std: 6.684547156770566
def normalize_band(band, ignored_mask=0):
band[band < -32760] = -100
ignored_idx = band == -100
if np.count_nonzero(band != -100) == 0:
band[:, :] = ignored_mask
else:
band = (band + 40) / 15
band[ignored_idx] = ignored_mask
return band
train_transforms = A.Compose([
#A.Rotate(limit=30, border_mode=cv2.BORDER_CONSTANT, p=0.3),
# A.HorizontalFlip(),
# A.VerticalFlip()
], additional_targets={
'conf_mask': 'mask',
'length_mask': 'mask',
'vessel_mask': 'mask',
'fishing_mask': 'mask',
'center_mask': 'mask'})
class XviewValDataset(Dataset):
def __init__(
self,
mode: str,
dataset_dir: str,
annotation_csv: str,
folds_csv: str,
multiplier: int = 1,
fold: int = 0,
crop_size: int = 1024,
sigma: int = 2,
radius: int = 4,
transforms: A.Compose = train_transforms
):
df = pd.read_csv(folds_csv)
self.radius = radius
if mode == "train":
self.names = df[df.fold != fold].scene_id.tolist()
else:
self.names = df[df.fold == fold].scene_id.tolist()
self.mode = mode
self.dataset_dir = dataset_dir
self.transforms = transforms
self.df = pd.read_csv(annotation_csv)
self.crop_size = crop_size
self.sigma = sigma
self.names = multiplier * self.names
if self.mode == "train":
random.shuffle(self.names)
def __getitem__(self, i):
if self.mode == "val":
return {
"name": self.names[i],
}
rm = random.Random()
rm.seed(time.time_ns())
name = self.names[i]
crop_size = self.crop_size
vv_full = tifffile.memmap(os.path.join(self.dataset_dir, "validation", name, "VV_dB.tif"), mode="r")
vh_full = tifffile.memmap(os.path.join(self.dataset_dir, "validation", name, "VH_dB.tif"), mode="r")
h, w = vv_full.shape
df = self.df
df = df[df.scene_id == name]
points = [row for _, row in df.iterrows()]
if len(points) > 1 and random.random() > 0.5:
point_idx = rm.randint(0, len(points) - 1)
point = points[point_idx]
y, x = point.detect_scene_row, point.detect_scene_column
max_shift_pad = 32
min_x_start = min(max(x - crop_size + max_shift_pad, 0), w - crop_size - 32)
min_y_start = min(max(y - crop_size + max_shift_pad, 0), h - crop_size - 32)
max_x_start = max(min(x - max_shift_pad, w - crop_size - 1), 0)
max_y_start = max(min(y - max_shift_pad, h - crop_size - 1), 0)
if max_x_start < min_x_start:
min_x_start, max_x_start = max_x_start, min_x_start
if max_y_start < min_y_start:
min_y_start, max_y_start = max_y_start, min_y_start
h_start = rm.randint(int(min_y_start), int(max_y_start))
w_start = rm.randint(int(min_x_start), int(max_x_start))
h_end = h_start + crop_size
w_end = w_start + crop_size
vh = vh_full[h_start: h_end, w_start: w_end].astype(np.float32)
vv = vv_full[h_start: h_end, w_start: w_end].astype(np.float32)
else:
for i in range(5):
h_start = rm.randint(0, h - crop_size - 1)
w_start = rm.randint(0, w - crop_size - 1)
h_end = h_start + crop_size
w_end = w_start + crop_size
vh = vh_full[h_start: h_end, w_start: w_end].astype(np.float32)
known_pixels = np.count_nonzero(vh > -1000)
vv = vv_full[h_start: h_end, w_start: w_end].astype(np.float32)
if known_pixels / (crop_size * crop_size) > 0.05:
break
object_mask = np.zeros_like(vv, dtype=np.float32)
vessel_mask = np.zeros_like(vv, dtype=np.float32)
fishing_mask = np.zeros_like(vv, dtype=np.float32)
conf_mask = np.zeros_like(vv, dtype=np.float32)
length_mask = np.zeros_like(vv)
length_mask[:, :] = -1
center_mask = np.zeros_like(vv)
size = 6 * self.sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * self.sigma + 1, 3 * self.sigma + 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
crop_coords = np.zeros((1024, 4))
crop_coords_idx = 0
for _, row in df.iterrows():
if h_start < row.detect_scene_row < h_end and w_start < row.detect_scene_column < w_end:
x = row.detect_scene_column - w_start
y = row.detect_scene_row - h_start
# CENTER MASK
# upper left
ul = int(np.round(x - 3 * self.sigma - 1)), int(np.round(y - 3 * self.sigma - 1))
# bottom right
br = int(np.round(x + 3 * self.sigma + 2)), int(np.round(y + 3 * self.sigma + 2))
c, d = max(0, -ul[0]), min(br[0], self.crop_size) - ul[0]
a, b = max(0, -ul[1]), min(br[1], self.crop_size) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], self.crop_size)
aa, bb = max(0, ul[1]), min(br[1], self.crop_size)
center_mask[aa:bb, cc:dd] = np.maximum(
center_mask[aa:bb, cc:dd], g[a:b, c:d])
# DEFINE VESSELS
# man-made maritime object
object_cls = 1
vessel_cls = 0
fishing_cls = 0
if math.isnan(row.is_vessel):
vessel_cls = 255
elif row.is_vessel:
vessel_cls = 1
if vessel_cls == 0:
fishing_cls = 0
elif math.isnan(row.is_fishing):
fishing_cls = 255
elif row.is_fishing:
fishing_cls = 1
confs = ['none', 'LOW', 'MEDIUM', 'HIGH']
conf_idx = confs.index(row.confidence)
if conf_idx > 1:
conf_idx = 2
cv2.circle(conf_mask, center=(x, y), radius=self.radius, color=conf_idx, thickness=-1)
cv2.circle(object_mask, center=(x, y), radius=self.radius if object_cls < 200 else 7, color=object_cls,
thickness=-1)
cv2.circle(vessel_mask, center=(x, y), radius=self.radius if vessel_cls < 200 else 7, color=vessel_cls,
thickness=-1)
cv2.circle(fishing_mask, center=(x, y), radius=self.radius if fishing_cls < 200 else 7,
color=fishing_cls,
thickness=-1)
# length MASK
vessel_length = -1
if not math.isnan(row.vessel_length_m):
vessel_length = row.vessel_length_m
cv2.circle(length_mask, center=(x, y), radius=self.radius if vessel_length > 0 else 7,
color=vessel_length,
thickness=-1)
if conf_idx > 1:
pad = 9
y1, y2 = y - pad, y + pad
x1, x2 = x - pad, x + pad
if x1 > 32 and x2 < self.crop_size - 32 and y1 > 32 and y2 < self.crop_size - 32:
crop_coords[crop_coords_idx] = np.array([x1, y1, x2, y2])
crop_coords_idx += 1
vv = normalize_band(band=vv, ignored_mask=0)
vh = normalize_band(band=vh, ignored_mask=0)
image = np.stack([vv, vh], axis=-1).astype(np.float32)
sample = self.transforms(image=image, mask=object_mask, center_mask=center_mask, length_mask=length_mask,
conf_mask=conf_mask, fishing_mask=fishing_mask, vessel_mask=vessel_mask)
image = sample["image"]
object_mask = sample["mask"]
center_mask = sample["center_mask"]
length_mask = sample["length_mask"]
vessel_mask = sample["vessel_mask"]
fishing_mask = sample["fishing_mask"]
conf_mask = sample["conf_mask"]
image = torch.from_numpy(image).float().moveaxis(-1, 0)
center_mask = torch.from_numpy(center_mask).float().unsqueeze(0) * 255
length_mask = torch.from_numpy(length_mask).float().unsqueeze(0)
conf_mask = torch.from_numpy(conf_mask).long()
object_mask = torch.from_numpy(object_mask).float().unsqueeze(0)
vessel_mask = torch.from_numpy(vessel_mask).float().unsqueeze(0)
fishing_mask = torch.from_numpy(fishing_mask).float().unsqueeze(0)
if random.random() < 0.5:
# 180 rotate to handle different sar orientation
image = torch.rot90(image, 2, dims=(1, 2))
center_mask = torch.rot90(center_mask, 2, dims=(1, 2))
length_mask = torch.rot90(length_mask, 2, dims=(1, 2))
conf_mask = torch.rot90(conf_mask, 2, dims=(0, 1))
object_mask = torch.rot90(object_mask, 2, dims=(1, 2))
vessel_mask = torch.rot90(vessel_mask, 2, dims=(1, 2))
fishing_mask = torch.rot90(fishing_mask, 2, dims=(1, 2))
ori_crops = crop_coords.copy()
crop_coords = self.crop_size - crop_coords
crop_coords[ori_crops == 0] = 0
crop_coords = crop_coords[:, [2, 3, 0, 1]]
crop_coords = torch.from_numpy(crop_coords).long()
return {
"image": image,
"object_mask": object_mask,
"crop_coords": crop_coords,
"conf_mask": conf_mask,
"vessel_mask": vessel_mask,
"fishing_mask": fishing_mask,
"center_mask": center_mask,
"length_mask": length_mask,
"name": name,
}
def __len__(self):
return len(self.names)
|
py | 1a36aca1dd2f130d1c7d1fdf07904874df55d44b | # -*- coding: utf-8 -*-
# Copyright(C) 2017 Phyks (Lucas Verney)
#
# This file is part of a woob module.
#
# This woob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This woob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this woob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
from woob.browser.pages import JsonPage, HTMLPage, pagination
from woob.browser.filters.standard import (
CleanDecimal, CleanText, Currency, Date, Env, Format, Regexp, RegexpError
)
from woob.browser.filters.html import AbsoluteLink, Attr, Link, XPathNotFound
from woob.browser.elements import ItemElement, ListElement, method
from woob.capabilities.base import NotAvailable, NotLoaded
from woob.capabilities.housing import (
City, Housing, HousingPhoto,
UTILITIES, ENERGY_CLASS, POSTS_TYPES, ADVERT_TYPES
)
from woob.tools.capabilities.housing.housing import PricePerMeterFilter
from .constants import AVAILABLE_TYPES, QUERY_TYPES, QUERY_HOUSE_TYPES
class CitiesPage(JsonPage):
def iter_cities(self):
cities_list = self.doc
if isinstance(self.doc, dict):
cities_list = self.doc.values()
for city in cities_list:
city_obj = City()
city_obj.id = city
city_obj.name = city
yield city_obj
class HousingPage(HTMLPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Format(
'%s:%s',
Env('type'),
Attr('//div[boolean(@data-property-reference)]', 'data-property-reference')
)
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_type(self):
type = Env('type')(self)
if type == 'location':
if 'appartement-meuble' in self.page.url:
return POSTS_TYPES.FURNISHED_RENT
else:
return POSTS_TYPES.RENT
elif type == 'achat':
return POSTS_TYPES.SALE
else:
return NotAvailable
def obj_url(self):
return self.page.url
def obj_house_type(self):
url = self.obj_url()
for house_type, types in QUERY_HOUSE_TYPES.items():
for type in types:
if ('/%s/' % type) in url:
return house_type
return NotAvailable
obj_title = CleanText('//h1[has-class("OfferTop-title")]')
obj_area = CleanDecimal(
Regexp(
CleanText(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][1]'
),
r'(\d*\.*\d*) .*',
default=NotAvailable
),
default=NotAvailable
)
obj_cost = CleanDecimal(
'//span[has-class("OfferTop-price")]',
default=NotAvailable
)
obj_price_per_meter = PricePerMeterFilter()
obj_currency = Currency(
'//span[has-class("OfferTop-price")]'
)
obj_location = Format(
'%s - %s',
CleanText('//p[@data-behat="adresseBien"]'),
CleanText('//p[has-class("OfferTop-loc")]')
)
obj_text = CleanText('//div[has-class("OfferDetails-content")]/p[1]')
obj_phone = Regexp(
Link(
'//a[has-class("OfferContact-btn--tel")]'
),
r'tel:(.*)'
)
def obj_photos(self):
photos = []
for photo in self.xpath('//div[has-class("OfferSlider")]//img'):
photo_url = Attr('.', 'src')(photo)
photo_url = photo_url.replace('640/480', '800/600')
photos.append(HousingPhoto(photo_url))
return photos
obj_date = datetime.date.today()
def obj_utilities(self):
price = CleanText(
'//p[has-class("OfferTop-price")]'
)(self)
if "charges comprises" in price.lower():
return UTILITIES.INCLUDED
else:
return UTILITIES.EXCLUDED
obj_rooms = CleanDecimal(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][2]',
default=NotAvailable
)
obj_bedrooms = CleanDecimal(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][3]',
default=NotAvailable
)
def obj_DPE(self):
try:
electric_consumption = CleanDecimal(Regexp(
Attr('//div[has-class("OfferDetails-content")]//img', 'src'),
r'https://dpe.foncia.net\/(\d+)\/.*'
))(self)
except (RegexpError, XPathNotFound):
electric_consumption = None
DPE = ""
if electric_consumption is not None:
if electric_consumption <= 50:
DPE = "A"
elif 50 < electric_consumption <= 90:
DPE = "B"
elif 90 < electric_consumption <= 150:
DPE = "C"
elif 150 < electric_consumption <= 230:
DPE = "D"
elif 230 < electric_consumption <= 330:
DPE = "E"
elif 330 < electric_consumption <= 450:
DPE = "F"
else:
DPE = "G"
return getattr(ENERGY_CLASS, DPE, NotAvailable)
return NotAvailable
def obj_details(self):
details = {}
dispo = Date(
Regexp(
CleanText('//p[has-class("OfferTop-dispo")]'),
r'.* (\d\d\/\d\d\/\d\d\d\d)',
default=datetime.date.today().isoformat()
)
)(self)
if dispo is not None:
details["dispo"] = dispo
priceMentions = CleanText(
'//p[has-class("OfferTop-mentions")]',
default=None
)(self)
if priceMentions is not None:
details["priceMentions"] = priceMentions
agency = CleanText(
'//p[has-class("OfferContact-address")]',
default=None
)(self)
if agency is not None:
details["agency"] = agency
for item in self.xpath('//div[has-class("OfferDetails-columnize")]/div'):
category = CleanText(
'./h3[has-class("OfferDetails-title--2")]',
default=None
)(item)
if not category:
continue
details[category] = {}
for detail_item in item.xpath('.//ul[has-class("List--data")]/li'):
detail_title = CleanText('.//span[has-class("List-data")]')(detail_item)
detail_value = CleanText('.//*[has-class("List-value")]')(detail_item)
details[category][detail_title] = detail_value
for detail_item in item.xpath('.//ul[has-class("List--bullet")]/li'):
detail_title = CleanText('.')(detail_item)
details[category][detail_title] = True
try:
electric_consumption = CleanDecimal(Regexp(
Attr('//div[has-class("OfferDetails-content")]//img', 'src'),
r'https://dpe.foncia.net\/(\d+)\/.*'
))(self)
details["electric_consumption"] = (
'{} kWhEP/m².an'.format(electric_consumption)
)
except (RegexpError, XPathNotFound):
pass
return details
class SearchPage(HTMLPage):
def do_search(self, query, cities):
form = self.get_form('//form[@name="searchForm"]')
form['searchForm[type]'] = QUERY_TYPES.get(query.type, None)
form['searchForm[localisation]'] = cities
form['searchForm[type_bien][]'] = []
for house_type in query.house_types:
try:
form['searchForm[type_bien][]'].extend(
QUERY_HOUSE_TYPES[house_type]
)
except KeyError:
pass
form['searchForm[type_bien][]'] = [
x for x in form['searchForm[type_bien][]']
if x in AVAILABLE_TYPES.get(query.type, [])
]
if query.area_min:
form['searchForm[surface_min]'] = query.area_min
if query.area_max:
form['searchForm[surface_max]'] = query.area_max
if query.cost_min:
form['searchForm[prix_min]'] = query.cost_min
if query.cost_max:
form['searchForm[prix_max]'] = query.cost_max
if query.nb_rooms:
form['searchForm[pieces]'] = [i for i in range(1, query.nb_rooms + 1)]
form.submit()
def find_housing(self, query_type, housing):
form = self.get_form('//form[@name="searchForm"]')
form['searchForm[type]'] = query_type
form['searchForm[reference]'] = housing
form.submit()
class SearchResultsPage(HTMLPage):
@pagination
@method
class iter_housings(ListElement):
item_xpath = '//article[has-class("TeaserOffer")]'
next_page = Link('//div[has-class("Pagination--more")]/a[contains(text(), "Suivant")]')
class item(ItemElement):
klass = Housing
obj_id = Format(
'%s:%s',
Env('type'),
Attr('.//span[boolean(@data-reference)]', 'data-reference')
)
obj_url = AbsoluteLink('.//h3[has-class("TeaserOffer-title")]/a')
obj_type = Env('query_type')
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_house_type(self):
url = self.obj_url(self)
for house_type, types in QUERY_HOUSE_TYPES.items():
for type in types:
if ('/%s/' % type) in url:
return house_type
return NotLoaded
obj_url = AbsoluteLink('.//h3[has-class("TeaserOffer-title")]/a')
obj_title = CleanText('.//h3[has-class("TeaserOffer-title")]')
obj_area = CleanDecimal(
Regexp(
CleanText(
'.//div[has-class("MiniData")]//p[@data-behat="surfaceDesBiens"]'
),
r'(\d*\.*\d*) .*',
default=NotAvailable
),
default=NotAvailable
)
obj_cost = CleanDecimal(
'.//strong[has-class("TeaserOffer-price-num")]',
default=NotAvailable
)
obj_price_per_meter = PricePerMeterFilter()
obj_currency = Currency(
'.//strong[has-class("TeaserOffer-price-num")]'
)
obj_location = CleanText('.//p[has-class("TeaserOffer-loc")]')
obj_text = CleanText('.//p[has-class("TeaserOffer-description")]')
def obj_photos(self):
url = CleanText(Attr('.//a[has-class("TeaserOffer-ill")]/img', 'src'))(self)
# If the used photo is a default no photo, the src is on the same domain.
if url[0] == '/':
return []
else:
return [HousingPhoto(url)]
obj_date = datetime.date.today()
def obj_utilities(self):
price = CleanText(
'.//strong[has-class("TeaserOffer-price-num")]'
)(self)
if "charges comprises" in price.lower():
return UTILITIES.INCLUDED
else:
return UTILITIES.EXCLUDED
obj_rooms = CleanDecimal(
'.//div[has-class("MiniData")]//p[@data-behat="nbPiecesDesBiens"]',
default=NotLoaded
)
obj_bedrooms = CleanDecimal(
'.//div[has-class("MiniData")]//p[@data-behat="nbChambresDesBiens"]',
default=NotLoaded
)
def obj_details(self):
return {
"dispo": Date(
Attr('.//span[boolean(@data-dispo)]', 'data-dispo',
default=datetime.date.today().isoformat())
)(self),
"priceMentions": CleanText('.//span[has-class("TeaserOffer-price-mentions")]')(self)
}
|
py | 1a36af0683437e0c78fb728aac0aa708c9a3c434 | """
This file contains an implementation of U-Net based on the paper
"U-Net: Convolutional Network for Biomedical Image Segmentation"
(https://arxiv.org/abs/1505.04597).
"""
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
def UNet(input_shape=(512,512,4)):
inputs = Input(input_shape)
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(512, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(9, (1, 1), activation='softmax')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
return model
|
py | 1a36af511b84e2f0ce6771d2048c8ff502681e62 | from typing import Optional
import xml.etree.ElementTree as ET
from ...xml.XmlReader import XmlReader as XR
from ..namespaces import API
from ..dto.InvoiceChainDigestResult import InvoiceChainDigestResult
from .deserialize_invoice_chain_element import deserialize_invoice_chain_element
def deserialize_invoice_chain_digest_result(element: ET.Element) -> Optional[InvoiceChainDigestResult]:
if element is None:
return None
result = InvoiceChainDigestResult(
current_page=XR.get_child_int(element, 'currentPage', API),
available_page=XR.get_child_int(element, 'availablePage', API),
invoice_chain_element=[deserialize_invoice_chain_element(e) for e in XR.find_all_child(element, 'invoiceChainElement', API)],
)
return result
|
py | 1a36af6c6d48f6f8f59be2b119c8d75078afa868 | from nose.tools import eq_, ok_
import wtforms
from flask import Flask
from flask_superadmin import Admin
from flask_superadmin.model import base
import flask_wtf as wtf
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
DoesNotExist = "dummy"
class Form(wtf.Form):
col1 = wtforms.TextField()
col2 = wtforms.TextField()
col3 = wtforms.TextField()
class MockModelView(base.BaseModelAdmin):
fields = ("col1", "col2", "col3")
def __init__(
self, model, name=None, category=None, endpoint=None, url=None, **kwargs
):
# Allow to set any attributes from parameters
for k, v in list(kwargs.items()):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
self.all_models = {1: Model(1), 2: Model(2)}
self.last_id = 3
# Scaffolding
def get_pk(self, instance):
return instance.id
def get_object(self, pk):
return self.all_models.get(int(pk))
def get_objects(self, *pks):
ret = []
for pk in pks:
ret.append(self.all_models.get(int(pk)))
return ret
def get_model_form(self):
def fake_model_form(*args, **kwargs):
return Form
return fake_model_form
def get_converter(self):
pass
def scaffold_list_columns(self):
columns = ["col1", "col2", "col3"]
if self.excluded_list_columns:
return [x for x in columns if x not in self.excluded_list_columns]
return columns
def init_search(self):
return bool(self.searchable_columns)
def scaffold_sortable_columns(self):
return ["col1", "col2", "col3"]
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort, sort_desc, search_query, **kwargs):
self.search_arguments.append((page, sort, sort_desc, search_query))
return len(self.all_models), iter(self.all_models.values())
def save_model(self, instance, form, adding=False):
if adding:
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
else:
form.populate_obj(instance)
self.updated_models.append(instance)
return True
def update_model(self, form, model):
return True
def delete_models(self, *pks):
for pk in pks:
self.deleted_models.append(self.all_models.get(int(pk)))
return True
def setup():
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
app.secret_key = "1"
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, "Model")
eq_(view.url, "/admin/model")
eq_(view.endpoint, "model")
ok_(view.blueprint is not None)
client = app.test_client()
# Make model view requests
rv = client.get("/admin/model/")
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get("/admin/model/add/")
eq_(rv.status_code, 200)
rv = client.post(
"/admin/model/add/", data=dict(col1="test1", col2="test2", col3="test3")
)
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, "test1")
eq_(model.col2, "test2")
eq_(model.col3, "test3")
# Try model edit view
rv = client.get("/admin/model/3/")
eq_(rv.status_code, 200)
ok_("test1" in rv.data.decode())
rv = client.post(
"/admin/model/3/", data=dict(col1="test!", col2="test@", col3="test#")
)
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, "test!")
eq_(model.col2, "test@")
eq_(model.col3, "test#")
rv = client.get("/admin/modelview/4/")
eq_(rv.status_code, 404)
# Attempt to delete model
rv = client.post("/admin/model/3/delete/", data=dict(confirm_delete=True))
eq_(rv.status_code, 302)
eq_(rv.headers["location"], "http://localhost/admin/model/")
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get("/admin/model/add/")
eq_(rv.status_code, 403)
view.can_edit = False
rv = client.get("/admin/model/1/")
# 200 resp, but readonly fields
eq_(rv.status_code, 200)
eq_(rv.data.decode().count('<div class="readonly-value">'), 3)
view.can_delete = False
rv = client.post("/admin/model/1/delete/")
eq_(rv.status_code, 403)
def test_permissions_and_add_delete_buttons():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
resp = client.get("/admin/model/")
eq_(resp.status_code, 200)
ok_("Add Model" in resp.data.decode())
view.can_create = False
resp = client.get("/admin/model/")
eq_(resp.status_code, 200)
ok_("Add Model" not in resp.data.decode())
view.can_edit = False
view.can_delete = False
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" not in resp.data.decode())
ok_("Save and stay on page" not in resp.data.decode())
ok_("Delete" not in resp.data.decode())
view.can_edit = False
view.can_delete = True
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" not in resp.data.decode())
ok_("Save and stay on page" not in resp.data.decode())
ok_("Delete" in resp.data.decode())
view.can_edit = True
view.can_delete = False
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" in resp.data.decode())
ok_("Save and stay on page" in resp.data.decode())
ok_("Delete" not in resp.data.decode())
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = "mock.html"
view.add_template = "mock.html"
view.edit_template = "mock.html"
rv = client.get("/admin/model/")
eq_(rv.data.decode(), "Success!")
rv = client.get("/admin/model/add/")
eq_(rv.data.decode(), "Success!")
rv = client.get("/admin/model/1/")
eq_(rv.data.decode(), "Success!")
def test_list_display_header():
app, admin = setup()
view = MockModelView(Model, list_display=["test_header"])
admin.add_view(view)
eq_(len(view.list_display), 1)
client = app.test_client()
rv = client.get("/admin/model/")
ok_("Test Header" in rv.data.decode())
def test_search_fields():
app, admin = setup()
view = MockModelView(Model, search_fields=["col1", "col2"])
admin.add_view(view)
eq_(view.search_fields, ["col1", "col2"])
client = app.test_client()
rv = client.get("/admin/model/")
ok_('<div class="search">' in rv.data.decode())
|
py | 1a36b02d8000c3588a3bdf017092ab4f61141352 | formatter = "%r %r %r %r"
print(formatter %(1,2,3,4))
print(formatter %('one','two','three','four'))
print(formatter %(True,False,False,True))
print(formatter %(formatter,formatter,formatter,formatter))
print(formatter %('I had this thing' ,'That you could type up right','But it didnt sing.','So I said GN'))
|
py | 1a36b0c9679a815fd86983839add36b5526ac122 | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
my_dict={'insert_me':"HELLO I AM FROM views.py !!"}
return render(request,'AppTwo\help.html',context=my_dict) |
py | 1a36b19d06b09545982e846c392cb4bb38a04220 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple, extendable nlp engine that can extract data based on provided conditions.
"""
__version__ = "0.0.7"
import os
import os.path
import sys
import re
import collections
from .model import *
from .normalizer import *
# -------------
# Stemmer
# -------------
class SuffixStemmer:
"""
Base class for stemming words
Return tuple of stemmed outputs
"""
def __init__(self, language, path=None):
self.language = language
self.stemwords = ()
if path is None:
subpath = os.path.dirname(__file__)
path = subpath+"/data/{}/suffix.txt".format(language)
with open(path) as f:
# read file strip \n sort by length and save as tuple
w = [w.strip() for w in f.readlines()]
w.sort(key=len)
self.stemwords = tuple(w)
def stem(self, word):
stem_list = []
for s in self.stemwords:
if word.endswith(s):
stem_list.append(word[:-len(s)])
return tuple(stem_list)
# -------------
# Tokenzier
# -------------
class LanguageTokenizer:
"""Tokenize string of data to array of tokens"""
def __init__(self, charset):
self.charset = charset
self.charset_counter = collections.Counter()
self.char_counter = collections.Counter()
self.tokens = []
def tokenize(self, text):
partial = ""
for i, character in enumerate(text):
# pick
if character.lower() in self.charset:
partial += character
else:
if len(partial) > 0:
self.append(partial, i)
partial = ""
self.append(character, i, False)
if len(partial) > 0:
self.append(partial, i)
return self.tokens
def append(self, data, index, charset=True):
if charset:
self.charset_counter[data.lower()] += 1
else:
self.char_counter[data.lower()] += 1
self.tokens.append(Token(token=data, end=index-1))
def most_common(self, n, charset=True):
if charset:
return self.charset_counter.most_common(n)
return self.char_counter.most_common(n)
# -------------
# Lexer / Visitor
# -------------
class Visitor:
"""
Utility class for Lexer that use Condition class to check wheather
we add Lemma to Lexer output or process list of tokens further
"""
def __init__(self, conditions, empty=True, auto_space=False, num_words=10):
self.conditions = conditions
# empty lexer token list
self.empty = empty
self.auto_space = auto_space
self.lemma = None
self.prev = None
self.num_words = num_words
def __contains__(self, item):
# get items of size num_words
token_list, next_token = item
item_copy = token_list[-self.num_words:]
while len(item_copy) > 0:
# make sentence from list of item
if self.auto_space:
data = Lemma.filter_space(item_copy)
sentence = Lemma.build_sentence(data, separator=" ")
else:
sentence = Lemma.build_sentence(item_copy)
# check sentence against conditions
for condition in self.conditions:
if (sentence, next_token) in condition:
self.lemma = Lemma(type=condition.lemma_type,
data=item_copy[:],
condition=condition.found,
sentence=sentence,
prev=self.prev)
if self.prev is not None:
self.prev.next = self.lemma
self.prev = self.lemma
return True
item_copy.pop(0)
return False
class Lexer:
"""
Converts list of tokens based on conditions in LexVisitor
"""
def __init__(self, tokens, visitor):
self.tokens = tokens
self.visitor = visitor
def lex(self, progress=False):
lemma_list = []
token_list = []
last_index = len(self.tokens) - 1
for i, token in enumerate(self.tokens):
token_list.append(token)
if i != last_index:
next_token = self.tokens[i+1]
else:
next_token = None
if (token_list, next_token) in self.visitor:
lemma_list.append(self.visitor.lemma)
if self.visitor.empty:
token_list = []
if progress:
sys.stdout.write("\r{}%".format(int(i/len(self.tokens)*100)))
return lemma_list
# -------------
# Prosecco
# -------------
class Prosecco:
"""Let's drink"""
def __init__(self, charset=Charset.EN, conditions=None, num_words=10):
conditions = conditions or [Condition(compare=r".*")]
# custom
self.lemmas = None
self.tokenizer = LanguageTokenizer(charset)
self.visitor = Visitor(conditions=conditions, num_words=num_words)
def drink(self, text, progress=False):
self.tokenizer.tokenize(text)
self.lexer = Lexer(tokens=self.tokenizer.tokens, visitor=self.visitor)
self.lemmas = self.lexer.lex(progress=progress)
return self.lemmas[:]
def get_lemmas(self, type):
return [l for l in self.lemmas if re.match(type, l.type)]
|
py | 1a36b2cde1c082a0a6c890705e9a8e715a617beb | import base64
import hashlib
from django.utils.encoding import force_text
class HashAlgorithm:
DEFAULT_BLOCK_SIZE = 65535
_registry = {}
hash_factory = None
@classmethod
def get(cls, name):
return cls._registry[name]
@classmethod
def register(cls, algorithm_class):
cls._registry[algorithm_class.name] = algorithm_class
def __init__(self, file_object, block_size=None):
self.block_size = block_size or self.DEFAULT_BLOCK_SIZE
self.file_object = file_object
self.hash_object = self.hash_factory()
def calculate(self):
while (True):
data = self.file_object.read(self.block_size)
if not data:
break
self.hash_object.update(data)
def get_digest(self):
return force_text(self._get_digest())
class SHA1Algorithm(HashAlgorithm):
hash_factory = hashlib.sha1
name = 'sha1'
def _get_digest(self):
return self.hash_object.hexdigest()
class SHA256Algorithm(HashAlgorithm):
hash_factory = hashlib.sha256
name = 'sha256'
def _get_digest(self):
return base64.b64encode(
self.hash_object.digest()
)
class SHA512Algorithm(SHA256Algorithm):
hash_factory = hashlib.sha512
name = 'sha512'
HashAlgorithm.register(algorithm_class=SHA1Algorithm)
HashAlgorithm.register(algorithm_class=SHA256Algorithm)
HashAlgorithm.register(algorithm_class=SHA512Algorithm)
|
py | 1a36b3ac3210a609d587e4acb34cc6c6d5a3b6b9 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for CIFAR-10/100.
Accuracy:
cifar_train.py achieves 83.0% accuracy after 100K steps (256 epochs
of data) as judged by cifar_eval.py.
Speed:
On a single Tesla K40, cifar_train.py processes a single batch of 128 images
in 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%
accuracy after 100K steps in 8 hours of training time.
Usage:
Please see the tutorial and website for how to download the CIFAR-10/100
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_name = dir_path + "/inception"
print(dir_name)
sys.path.append(dir_name)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
import cifar_common
import cifar_resnet_tf as cifar
def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Loaded checkpoint: ', ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('Precision for ', FLAGS.eval_data)
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10/100 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar_common.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
num_classes = 10
if FLAGS.dataset == 'cifar100':
num_classes = 100
logits = cifar.inference(images, num_classes=num_classes, for_training=False)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar_common.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
|
py | 1a36b5aabd09596efcb1f5804ee92d09d5546fa4 | ######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
from botocore.client import Config
import paramiko
from xml.dom import minidom
import ast
import time
import os
import string
import logging
log_level = str(os.environ.get('LOG_LEVEL')).upper()
if log_level not in ['DEBUG', 'INFO','WARNING', 'ERROR','CRITICAL']:
log_level = 'ERROR'
log = logging.getLogger()
log.setLevel(log_level)
config_file=str(os.environ.get('CONFIG_FILE'))
#These S3 endpoint URLs are provided to support VPC endpoints for S3 in regions such as Frankfort that require explicit region endpoint definition
endpoint_url = {
"us-east-1" : "https://s3.amazonaws.com",
"us-east-2" : "https://s3-us-east-2.amazonaws.com",
"us-west-1" : "https://s3-us-west-1.amazonaws.com",
"us-west-2" : "https://s3-us-west-2.amazonaws.com",
"eu-west-1" : "https://s3-eu-west-1.amazonaws.com",
"eu-west-2" : "https://s3-eu-west-2.amazonaws.com",
"eu-central-1" : "https://s3-eu-central-1.amazonaws.com",
"ca-central-1" : "https://s3-ca-central-1.amazonaws.com",
"ap-northeast-1" : "https://s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2" : "https://s3-ap-northeast-2.amazonaws.com",
"ap-south-1" : "https://s3-ap-south-1.amazonaws.com",
"ap-southeast-1" : "https://s3-ap-southeast-1.amazonaws.com",
"ap-southeast-2" : "https://s3-ap-southeast-2.amazonaws.com",
"sa-east-1" : "https://s3-sa-east-1.amazonaws.com"
}
#Logic to determine when the prompt has been discovered
def prompt(chan):
buff = ''
while not buff.endswith('#'):
resp = chan.recv(9999)
buff += resp
#log.debug("%s",resp)
return buff
# Logic to figure out the next availble tunnel
def getNextTunnelId(ssh):
log.debug('Start getNextTunnelId')
ssh.send('term len 0\n')
log.debug("%s",prompt(ssh))
ssh.send('config t\n')
log.debug("%s",prompt(ssh))
ssh.send('do show int summary | include Tunnel\n')
output = prompt(ssh)
log.debug("%s",output)
ssh.send('exit\n')
log.debug("%s",prompt(ssh))
lastTunnelNum=''
for line in output.split('\n'):
line=line.replace('* Tunnel','Tunnel')
log.debug("%s",line)
if line.strip()[:6] == 'Tunnel':
lastTunnelNum = line.strip().partition(' ')[0].replace('Tunnel','')
if lastTunnelNum == '':
return 1
return int(lastTunnelNum) + 1
# Logic to figure out existing tunnel IDs
def getExistingTunnelId(ssh,vpn_connection_id):
log.debug('Start getExistingTunnelId')
ssh.send('term len 0\n')
log.debug("%s",prompt(ssh))
#ssh.send('config t\n')
#log.debug("%s",prompt(ssh))
#Display keyrings so we can derive tunnelId
ssh.send('show run | include crypto keyring\n')
output = prompt(ssh)
log.debug("%s",output)
tunnelNum=0
#Now parse crypto keyring lines for keyring-vpn-connection_id-tunnelId
for line in output.split('\n'):
if vpn_connection_id in line:
tmpNum = int(line.split('-')[-1])
if tunnelNum < tmpNum:
tunnelNum = tmpNum
if tunnelNum == 0:
log.error('Unable to find existing tunnels for %s', vpn_connection_id)
return 0
#Parsing logic gets the greater of the two tunnel numbers, so return tunnelNum -1 to get the first tunnel number
return tunnelNum-1
#Generic logic to push pre-generated Cisco config to the router
def pushConfig(ssh,config):
#log.info("Starting to push config")
#ssh.send('term len 0\n')
#prompt(ssh)
ssh.send('config t\n')
log.debug("%s",prompt(ssh))
stime = time.time()
for line in config:
if line == "WAIT":
log.debug("Waiting 30 seconds...")
time.sleep(30)
else:
ssh.send(line+'\n')
log.debug("%s",prompt(ssh))
ssh.send('exit\n')
log.debug("%s",prompt(ssh))
log.debug(" --- %s seconds ---", (time.time() - stime))
log.info("Saving config!")
ssh.send('copy run start\n\n\n\n\n')
log.info("%s",prompt(ssh))
log.info("Update complete!")
#Logic to determine the bucket prefix from the S3 key name that was provided
def getBucketPrefix(bucket_name, bucket_key):
#Figure out prefix from known bucket_name and bucket_key
bucket_prefix = '/'.join(bucket_key.split('/')[:-2])
if len(bucket_prefix) > 0:
bucket_prefix += '/'
return bucket_prefix
#Logic to download the transit VPC configuration file from S3
def getTransitConfig(bucket_name, bucket_prefix, s3_url, config_file):
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading config file: %s/%s/%s%s", s3_url, bucket_name, bucket_prefix,config_file)
return ast.literal_eval(s3.get_object(Bucket=bucket_name,Key=bucket_prefix+config_file)['Body'].read())
#Logic to upload a new/updated transit VPC configuration file to S3 (not currently used)
def putTransitConfig(bucket_name, bucket_prefix, s3_url, config_file, config):
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Uploading new config file: %s/%s/%s%s", s3_url,bucket_name, bucket_prefix,config_file)
s3.put_object(Bucket=bucket_name,Key=bucket_prefix+config_file,Body=str(config))
#Logic to download the SSH private key from S3 to be used for SSH public key authentication
def downloadPrivateKey(bucket_name, bucket_prefix, s3_url, prikey):
if os.path.exists('/tmp/'+prikey):
os.remove('/tmp/'+prikey)
s3=boto3.client('s3', endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
log.info("Downloading private key: %s/%s/%s%s",s3_url, bucket_name, bucket_prefix, prikey)
s3.download_file(bucket_name,bucket_prefix+prikey, '/tmp/'+prikey)
#Logic to create the appropriate Cisco configuration
def create_cisco_config(bucket_name, bucket_key, s3_url, bgp_asn, ssh):
log.info("Processing %s/%s", bucket_name, bucket_key)
#Download the VPN configuration XML document
s3=boto3.client('s3',endpoint_url=s3_url,
config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4'))
config=s3.get_object(Bucket=bucket_name,Key=bucket_key)
xmldoc=minidom.parseString(config['Body'].read())
#Extract transit_vpc_configuration values
vpn_config = xmldoc.getElementsByTagName("transit_vpc_config")[0]
account_id = vpn_config.getElementsByTagName("account_id")[0].firstChild.data
vpn_endpoint = vpn_config.getElementsByTagName("vpn_endpoint")[0].firstChild.data
vpn_status = vpn_config.getElementsByTagName("status")[0].firstChild.data
preferred_path = vpn_config.getElementsByTagName("preferred_path")[0].firstChild.data
#Extract VPN connection information
vpn_connection=xmldoc.getElementsByTagName('vpn_connection')[0]
vpn_connection_id=vpn_connection.attributes['id'].value
customer_gateway_id=vpn_connection.getElementsByTagName("customer_gateway_id")[0].firstChild.data
vpn_gateway_id=vpn_connection.getElementsByTagName("vpn_gateway_id")[0].firstChild.data
vpn_connection_type=vpn_connection.getElementsByTagName("vpn_connection_type")[0].firstChild.data
#Determine the VPN tunnels to work with
if vpn_status == 'create':
tunnelId=getNextTunnelId(ssh)
else:
tunnelId=getExistingTunnelId(ssh,vpn_connection_id)
if tunnelId == 0:
return
log.info("%s %s with tunnel #%s and #%s.",vpn_status, vpn_connection_id, tunnelId, tunnelId+1)
# Create or delete the VRF for this connection
if vpn_status == 'delete':
ipsec_tunnel = vpn_connection.getElementsByTagName("ipsec_tunnel")[0]
customer_gateway=ipsec_tunnel.getElementsByTagName("customer_gateway")[0]
customer_gateway_bgp_asn=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
#Remove VPN configuration for both tunnels
config_text = ['router bgp {}'.format(customer_gateway_bgp_asn)]
config_text.append(' no address-family ipv4 vrf {}'.format(vpn_connection_id))
config_text.append('exit')
config_text.append('no ip vrf {}'.format(vpn_connection_id))
config_text.append('interface Tunnel{}'.format(tunnelId))
config_text.append(' shutdown')
config_text.append('exit')
config_text.append('no interface Tunnel{}'.format(tunnelId))
config_text.append('interface Tunnel{}'.format(tunnelId+1))
config_text.append(' shutdown')
config_text.append('exit')
config_text.append('no interface Tunnel{}'.format(tunnelId+1))
config_text.append('no route-map rm-{} permit'.format(vpn_connection_id))
# Cisco requires waiting 60 seconds before removing the isakmp profile
config_text.append('WAIT')
config_text.append('WAIT')
config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId+1))
config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId+1))
else:
# Create global tunnel configuration
config_text = ['ip vrf {}'.format(vpn_connection_id)]
config_text.append(' rd {}:{}'.format(bgp_asn, tunnelId))
config_text.append(' route-target export {}:0'.format(bgp_asn))
config_text.append(' route-target import {}:0'.format(bgp_asn))
config_text.append('exit')
# Check to see if a route map is needed for creating a preferred path
if preferred_path != 'none':
config_text.append('route-map rm-{} permit'.format(vpn_connection_id))
# If the preferred path is this transit VPC vpn endpoint, then set a shorter as-path prepend than if it is not
if preferred_path == vpn_endpoint:
config_text.append(' set as-path prepend {}'.format(bgp_asn))
else:
config_text.append(' set as-path prepend {} {}'.format(bgp_asn, bgp_asn))
config_text.append('exit')
# Create tunnel specific configuration
for ipsec_tunnel in vpn_connection.getElementsByTagName("ipsec_tunnel"):
customer_gateway=ipsec_tunnel.getElementsByTagName("customer_gateway")[0]
customer_gateway_tunnel_outside_address=customer_gateway.getElementsByTagName("tunnel_outside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
customer_gateway_tunnel_inside_address_ip_address=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
customer_gateway_tunnel_inside_address_network_mask=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_mask")[0].firstChild.data
customer_gateway_tunnel_inside_address_network_cidr=customer_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_cidr")[0].firstChild.data
customer_gateway_bgp_asn=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
customer_gateway_bgp_hold_time=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data
vpn_gateway=ipsec_tunnel.getElementsByTagName("vpn_gateway")[0]
vpn_gateway_tunnel_outside_address=vpn_gateway.getElementsByTagName("tunnel_outside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
vpn_gateway_tunnel_inside_address_ip_address=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("ip_address")[0].firstChild.data
vpn_gateway_tunnel_inside_address_network_mask=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_mask")[0].firstChild.data
vpn_gateway_tunnel_inside_address_network_cidr=vpn_gateway.getElementsByTagName("tunnel_inside_address")[0].getElementsByTagName("network_cidr")[0].firstChild.data
vpn_gateway_bgp_asn=vpn_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
vpn_gateway_bgp_hold_time=vpn_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data
ike=ipsec_tunnel.getElementsByTagName("ike")[0]
ike_authentication_protocol=ike.getElementsByTagName("authentication_protocol")[0].firstChild.data
ike_encryption_protocol=ike.getElementsByTagName("encryption_protocol")[0].firstChild.data
ike_lifetime=ike.getElementsByTagName("lifetime")[0].firstChild.data
ike_perfect_forward_secrecy=ike.getElementsByTagName("perfect_forward_secrecy")[0].firstChild.data
ike_mode=ike.getElementsByTagName("mode")[0].firstChild.data
ike_pre_shared_key=ike.getElementsByTagName("pre_shared_key")[0].firstChild.data
ipsec=ipsec_tunnel.getElementsByTagName("ipsec")[0]
ipsec_protocol=ipsec.getElementsByTagName("protocol")[0].firstChild.data
ipsec_authentication_protocol=ipsec.getElementsByTagName("authentication_protocol")[0].firstChild.data
ipsec_encryption_protocol=ipsec.getElementsByTagName("encryption_protocol")[0].firstChild.data
ipsec_lifetime=ipsec.getElementsByTagName("lifetime")[0].firstChild.data
ipsec_perfect_forward_secrecy=ipsec.getElementsByTagName("perfect_forward_secrecy")[0].firstChild.data
ipsec_mode=ipsec.getElementsByTagName("mode")[0].firstChild.data
ipsec_clear_df_bit=ipsec.getElementsByTagName("clear_df_bit")[0].firstChild.data
ipsec_fragmentation_before_encryption=ipsec.getElementsByTagName("fragmentation_before_encryption")[0].firstChild.data
ipsec_tcp_mss_adjustment=ipsec.getElementsByTagName("tcp_mss_adjustment")[0].firstChild.data
ipsec_dead_peer_detection_interval=ipsec.getElementsByTagName("dead_peer_detection")[0].getElementsByTagName("interval")[0].firstChild.data
ipsec_dead_peer_detection_retries=ipsec.getElementsByTagName("dead_peer_detection")[0].getElementsByTagName("retries")[0].firstChild.data
config_text.append('crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append(' local-address GigabitEthernet1')
config_text.append(' pre-shared-key address {} key {}'.format(vpn_gateway_tunnel_outside_address, ike_pre_shared_key))
config_text.append('exit')
config_text.append('crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append(' local-address GigabitEthernet1')
config_text.append(' match identity address {}'.format(vpn_gateway_tunnel_outside_address))
config_text.append(' keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
config_text.append('exit')
config_text.append('interface Tunnel{}'.format(tunnelId))
config_text.append(' description {} from {} to {} for account {}'.format(vpn_connection_id, vpn_gateway_id, customer_gateway_id, account_id))
config_text.append(' ip vrf forwarding {}'.format(vpn_connection_id))
config_text.append(' ip address {} 255.255.255.252'.format(customer_gateway_tunnel_inside_address_ip_address))
config_text.append(' ip virtual-reassembly')
config_text.append(' tunnel source GigabitEthernet1')
config_text.append(' tunnel destination {} '.format(vpn_gateway_tunnel_outside_address))
config_text.append(' tunnel mode ipsec ipv4')
config_text.append(' tunnel protection ipsec profile ipsec-vpn-aws')
config_text.append(' ip tcp adjust-mss 1387')
config_text.append(' no shutdown')
config_text.append('exit')
config_text.append('router bgp {}'.format(customer_gateway_bgp_asn))
config_text.append(' address-family ipv4 vrf {}'.format(vpn_connection_id))
config_text.append(' neighbor {} remote-as {}'.format(vpn_gateway_tunnel_inside_address_ip_address, vpn_gateway_bgp_asn))
if preferred_path != 'none':
config_text.append(' neighbor {} route-map rm-{} out'.format(vpn_gateway_tunnel_inside_address_ip_address, vpn_connection_id))
config_text.append(' neighbor {} timers 10 30 30'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} activate'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} as-override'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} soft-reconfiguration inbound'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append(' neighbor {} next-hop-self'.format(vpn_gateway_tunnel_inside_address_ip_address))
config_text.append('exit')
config_text.append('exit')
#Increment tunnel ID for going onto the next tunnel
tunnelId+=1
log.debug("Conversion complete")
return config_text
def lambda_handler(event, context):
record=event['Records'][0]
bucket_name=record['s3']['bucket']['name']
bucket_key=record['s3']['object']['key']
bucket_region=record['awsRegion']
bucket_prefix=getBucketPrefix(bucket_name, bucket_key)
log.debug("Getting config")
stime = time.time()
config = getTransitConfig(bucket_name, bucket_prefix, endpoint_url[bucket_region], config_file)
if 'CSR1' in bucket_key:
csr_ip=config['PIP1']
csr_name='CSR1'
else:
csr_ip=config['PIP2']
csr_name='CSR2'
log.info("--- %s seconds ---", (time.time() - stime))
#Download private key file from secure S3 bucket
downloadPrivateKey(bucket_name, bucket_prefix, endpoint_url[bucket_region], config['PRIVATE_KEY'])
log.debug("Reading downloaded private key into memory.")
k = paramiko.RSAKey.from_private_key_file("/tmp/"+config['PRIVATE_KEY'])
#Delete the temp copy of the private key
os.remove("/tmp/"+config['PRIVATE_KEY'])
log.debug("Deleted downloaded private key.")
c = paramiko.SSHClient()
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
log.info("Connecting to %s (%s)", csr_name, csr_ip)
stime = time.time()
try:
c.connect( hostname = csr_ip, username = config['USER_NAME'], pkey = k )
PubKeyAuth=True
except paramiko.ssh_exception.AuthenticationException:
log.error("PubKey Authentication Failed! Connecting with password")
c.connect( hostname = csr_ip, username = config['USER_NAME'], password = config['PASSWORD'] )
PubKeyAuth=False
log.info("--- %s seconds ---", (time.time() - stime))
log.debug("Connected to %s",csr_ip)
ssh = c.invoke_shell()
log.debug("%s",prompt(ssh))
log.debug("Creating config.")
stime = time.time()
csr_config = create_cisco_config(bucket_name, bucket_key, endpoint_url[bucket_region], config['BGP_ASN'], ssh)
log.info("--- %s seconds ---", (time.time() - stime))
log.info("Pushing config to router.")
stime = time.time()
pushConfig(ssh,csr_config)
log.info("--- %s seconds ---", (time.time() - stime))
ssh.close()
return
{
'message' : "Script execution completed. See Cloudwatch logs for complete output"
}
|
py | 1a36b80dbbe3ff656674863c65ceb43e7d65e0dc | """
Sets the config parameters for the flask app object.
These are accessible in a dictionary, with each line defining a key.
"""
import os
from tempfile import TemporaryDirectory
import torch
_TEMP_FOLDER_OBJECT = TemporaryDirectory()
DEFAULT_USER_ID = 1
ROOT_FOLDER = os.path.dirname(os.path.realpath(__file__))
DATA_FOLDER = os.path.join(ROOT_FOLDER, 'app/web_data')
CHECKPOINT_FOLDER = os.path.join(ROOT_FOLDER, 'app/web_checkpoints')
TEMP_FOLDER = os.path.join(ROOT_FOLDER, _TEMP_FOLDER_OBJECT.name)
SMILES_FILENAME = 'smiles.csv'
PREDICTIONS_FILENAME = 'predictions.csv'
DB_FILENAME = 'chemprop.sqlite3'
CUDA = torch.cuda.is_available()
GPUS = list(range(torch.cuda.device_count()))
|
py | 1a36b8f3ed3f0b99f3a010709e3882d715771136 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import Organization
from sentry.testutils import APITestCase
class OrganizationsListTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_simple(self):
org = self.create_organization(owner=self.user)
self.login_as(user=self.user)
response = self.client.get(self.path)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(org.id)
class OrganizationsCreateTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_missing_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path)
assert resp.status_code == 400
def test_valid_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.name == 'hello world'
assert org.slug == 'foobar'
def test_without_slug(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.slug == 'hello-world'
|
py | 1a36b90fb2d9334100f3c7056606e28c0bfce0c5 | TURBO_SETI_VERSION = '2.1.20'
|
py | 1a36b963c6a16d79aa5c7eef751a1d1ab71bf1dd | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.awt import XMenu as XMenu
setattr(XMenu, '__ooo_ns__', 'com.sun.star.awt')
setattr(XMenu, '__ooo_full_ns__', 'com.sun.star.awt.XMenu')
setattr(XMenu, '__ooo_type_name__', 'interface')
else:
from ...lo.awt.x_menu import XMenu as XMenu
__all__ = ['XMenu']
|
py | 1a36ba9911989204622480fe89bb322fa00bb286 | from flask import Flask, render_template, g
from flask_socketio import SocketIO, emit
from flask_cors import CORS
from models import Database
import settings
app = Flask(__name__, template_folder="./templates",
static_folder="./assets/", static_url_path="")
with app.app_context():
DB = Database()
app.config['SECRET_KEY'] = getattr(settings, 'SECRET_KEY', 'mySecretKey')
cors = CORS(app, resources={r"/*": {"origins": "*"}})
socketio = SocketIO(app, logger=True, engineio_logger=True)
@socketio.on('connect')
def on_connect():
res = DB.query_db("""
SELECT *
FROM comments
ORDER BY id DESC
LIMIT 20;
""")
emit("load_comments", res, broadcast=True)
@socketio.on('add comment event')
def on_add_comment(data=None):
comment_id = DB.insert_query("""INSERT INTO comments(
parent_id,
content,
nbr_vote,
added,
author_lastname,
author_firstname)
VALUES(?,?,?,?,?,?)
""", (
data.get('parent_id', None),
data['content'],
data['nbr_vote'],
data['date_added'],
'DOE',
'John',
))
data['id'] = comment_id
print(">>>> Add", data)
emit('add_handler_comment', data, broadcast=True)
@socketio.on('delete comment event')
def on_delete_comment(data=None):
print(">>>> Delete", data)
result = DB.delete_query(
"DELETE FROM comments WHERE id = ?",
(data['comment_id'],)
)
data['message'] = result
emit('delete_handler_comment', data, broadcast=True)
@socketio.on('vote comment event')
def on_vote_comment(data=None):
print("DEBUG UPDATE", "data", data)
DB.update_query(
"UPDATE comments SET nbr_vote = ? WHERE id = ?", (
data['nbr_vote'],
data['comment_id'],
)
)
print(">>>> Vote", data)
emit('vote_handler_comment', data, broadcast=True)
@app.route("/")
def indexRoute():
# db = Database()
return render_template('index.html')
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == '__main__':
socketio.run(app, port=5000, host='127.0.0.1')
|
py | 1a36bb577796f0cb4a6a8ce12ca3e82cee1ce472 | # keep line below to expose "main" content in icclim package namespace
from .main import index, indice, indices
__version__ = "5.0.2"
|
py | 1a36bb80013e3f91015783f2ef365acbedc6d09b | """user
Revision ID: ba4c10cd1c2e
Revises:
Create Date: 2021-02-14 17:14:42.063643
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ba4c10cd1c2e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('password', sa.String(length=32), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('session',
sa.Column('key', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('expires', sa.DateTime(timezone=True), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('key')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('session')
op.drop_table('user')
# ### end Alembic commands ###
|
py | 1a36bcdc91e030fe888d781101309d795533f3ee | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.apps import AppConfig
class StreamtipConfig(AppConfig):
name = 'streamtip'
verbose_name = 'Streamtip Application'
def ready(self):
import streamtip.signals
|
py | 1a36c0b8749434d07ab2e4704c1ad70b352e44bd | #!/usr/bin/env python3.7
# Copyright: Ismael Narvaez Berenjeno
import asyncio
import json
from typing import Union, List
from nats.aio.client import Client as NatsClient
from async_nats_connection.data_model.async_nats_params import AsyncNatsParams
from async_nats_connection.util.time import get_time_isoformat
class AsyncNatsConnection:
def __init__(self, params: AsyncNatsParams, logger):
self.name = self.__class__.__name__
self._params = params
self.nats_client = None
self.input_messages_queue = asyncio.Queue()
self.subscribed_subjects = {}
self.logger = logger
self.logger.debug(f"Instantiated class {self.name}")
def __repr__(self):
net_connection_info = None
if self.nats_client:
net_connection_info = self.nats_client.connected_url.netloc
if net_connection_info:
return f"<AsyncNatsConnection with {net_connection_info}>"
else:
return "<AsyncNatsConnection not connected>"
async def _disconnected_cb(self):
self.logger.warning("Disconnected from NATS server.")
async def _reconnected_cb(self):
self.logger.info(f"Got reconnected to {self.nats_client.connected_url.netloc}")
async def _error_cb(self, error):
self.logger.error(f"Error with NATS connection: {error}")
async def _closed_cb(self):
self.logger.info("Connection closed with NATS server.")
async def _connect(self):
result = False
if self.nats_client is None:
self.nats_client = NatsClient()
try:
await self.nats_client.connect(
**self._params.dict(),
io_loop=asyncio.get_running_loop(),
disconnected_cb=self._disconnected_cb,
reconnected_cb=self._reconnected_cb,
error_cb=self._error_cb,
closed_cb=self._closed_cb
)
result = self.nats_client.is_connected
except Exception as exception:
self.logger.exception(exception)
self.nats_client = None
return result
async def _disconnect(self):
await self.nats_client.drain()
await self.nats_client.close()
self.nats_client = None
async def _message_handler(self, message):
"""Execute this function when we received a message.
Put in a queue.
"""
output_message = {
"subject": message.subject,
"payload": message.data.decode(),
"timestamp": get_time_isoformat()
}
await self.input_messages_queue.put(output_message)
async def publish(self, subject: str, data: Union[str, dict, list]) -> List[bool]:
"""Publish messages in a subject.
:param subject: Subject where publish
:param data: Data to publish
:return: Operation result
"""
if self.nats_client is None:
await self._connect()
result = [False]
if isinstance(data, str):
try:
await self.nats_client.publish(subject, data.encode())
result = [True]
except Exception as exception:
self.logger.exception(exception)
result = [False]
elif isinstance(data, dict):
data = json.dumps(data)
result = await self.publish(subject, data)
elif isinstance(data, list):
tasks = [self.publish(subject, element) for element in data]
result = await asyncio.gather(
*tasks
)
else:
self.logger.error(f"Data sent must be str, dict or list, not {type(data)}")
result = [False]
return result
async def subscribe(self, subject: str) -> bool:
"""Subscribe to a subject.
:param subject: Subject to subscribe
:return: Operation result
"""
result = False
if self.nats_client is None:
await self._connect()
try:
if subject in self.subscribed_subjects:
raise ValueError(f"Duplicated subject: {subject}. Unsubscribe first.")
subscribe_id = await self.nats_client.subscribe(subject, cb=self._message_handler)
self.subscribed_subjects[subject] = {
"id": subscribe_id,
"timestamp": get_time_isoformat()
}
result = True
except Exception as exception:
self.logger.exception(exception)
result = False
return result
async def unsubscribe(self, subject: str) -> bool:
"""Unsubscribe from a subject.
:param subject: Subject to unsubscribe
:return: Operation result
"""
result = False
if subject in self.subscribed_subjects:
subscribe_id = self.subscribed_subjects[subject]['id']
try:
await self.nats_client.unsubscribe(subscribe_id)
del self.subscribed_subjects[subject]
result = True
except Exception as exception:
self.logger.exception(exception)
result = False
else:
self.logger.error(f"Subject {subject} doesn't exist.")
result = False
return result
def get_subscribed_subjects(self) -> dict:
"""
Return dict with info about subscribed subjects.
:return: Subscribed subjects.
"""
return self.subscribed_subjects
def exist_queued_messages(self) -> bool:
"""Check if exists elements to read.
:return: Operation result.
"""
is_emptied_queue = self.input_messages_queue.empty()
exist_element = not is_emptied_queue
return exist_element
async def get_message(self) -> dict:
"""Return a received message.
:return: Message with subject, payload and received timestamp.
"""
if self.exist_queued_messages():
return await self.input_messages_queue.get()
else:
self.logger.info("Not messages queued.")
async def connect(self):
"""Connect to NATS server."""
await self._connect()
async def disconnect(self):
"""Disconnect from NATS server."""
await self._disconnect()
|
py | 1a36c10c7ad47cf3a5d15cbfeb1413b309b026a8 | i = 0 #define an integer
while (i < 119):
print(i)
i += 10 |
py | 1a36c224fe8d512aecd6eeb30127086030a1d144 | import copy
import json
import furl
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.providers.github import settings
from waterbutler.providers.github.path import GitHubPath
from waterbutler.providers.github.metadata import GitHubRevision
from waterbutler.providers.github.metadata import GitHubFileContentMetadata
from waterbutler.providers.github.metadata import GitHubFolderContentMetadata
from waterbutler.providers.github.metadata import GitHubFileTreeMetadata
from waterbutler.providers.github.metadata import GitHubFolderTreeMetadata
from waterbutler.providers.github.exceptions import GitHubUnsupportedRepoError
GIT_EMPTY_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
class GitHubProvider(provider.BaseProvider):
"""Provider for GitHub repositories.
**On paths:** WB and GH use slightly different default conventions for their paths, so we
often have to munge our WB paths before comparison. Here is a quick overview::
WB (dirs): wb_dir.path == 'foo/bar/' str(wb_dir) == '/foo/bar/'
WB (file): wb_file.path = 'foo/bar.txt' str(wb_file) == '/foo/bar.txt'
GH (dir): 'foo/bar'
GH (file): 'foo/bar.txt'
API docs: https://developer.github.com/v3/
Quirks:
* git doesn't have a concept of empty folders, so this provider creates 0-byte ``.gitkeep``
files in the requested folder.
* The ``contents`` endpoint cannot be used to fetch metadata reliably for all files. Requesting
a file that is larger than 1Mb will result in a error response directing you to the ``blob``
endpoint. A recursive tree fetch may be used instead.
* The tree endpoint truncates results after a large number of files. It does not provide a way
to page through the tree. Since move, copy, and folder delete operations rely on whole-tree
replacement, they cannot be reliably supported for large repos. Attempting to use them will
throw a 501 Not Implemented error.
"""
NAME = 'github'
BASE_URL = settings.BASE_URL
VIEW_URL = settings.VIEW_URL
@staticmethod
def is_sha(ref):
# sha1 is always 40 characters in length
try:
if len(ref) != 40:
return False
# sha1 is always base 16 (hex)
int(ref, 16)
except (TypeError, ValueError, ):
return False
return True
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.name = self.auth.get('name', None)
self.email = self.auth.get('email', None)
self.token = self.credentials['token']
self.owner = self.settings['owner']
self.repo = self.settings['repo']
self.metrics.add('repo', {'repo': self.repo, 'owner': self.owner})
async def validate_v1_path(self, path, **kwargs):
if not getattr(self, '_repo', None):
self._repo = await self._fetch_repo()
self.default_branch = self._repo['default_branch']
branch_ref, ref_from = None, None
if kwargs.get('ref'):
branch_ref = kwargs.get('ref')
ref_from = 'query_ref'
elif kwargs.get('branch'):
branch_ref = kwargs.get('branch')
ref_from = 'query_branch'
else:
branch_ref = self.default_branch
ref_from = 'default_branch'
if isinstance(branch_ref, list):
raise exceptions.InvalidParameters('Only one ref or branch may be given.')
self.metrics.add('branch_ref_from', ref_from)
if path == '/':
return GitHubPath(path, _ids=[(branch_ref, '')])
branch_data = await self._fetch_branch(branch_ref)
# throws Not Found if path not in tree
await self._search_tree_for_path(path, branch_data['commit']['commit']['tree']['sha'])
path = GitHubPath(path)
for part in path.parts:
part._id = (branch_ref, None)
# TODO Validate that filesha is a valid sha
path.parts[-1]._id = (branch_ref, kwargs.get('fileSha'))
self.metrics.add('file_sha_given', True if kwargs.get('fileSha') else False)
return path
async def validate_path(self, path, **kwargs):
if not getattr(self, '_repo', None):
self._repo = await self._fetch_repo()
self.default_branch = self._repo['default_branch']
path = GitHubPath(path)
branch_ref, ref_from = None, None
if kwargs.get('ref'):
branch_ref = kwargs.get('ref')
ref_from = 'query_ref'
elif kwargs.get('branch'):
branch_ref = kwargs.get('branch')
ref_from = 'query_branch'
else:
branch_ref = self.default_branch
ref_from = 'default_branch'
if isinstance(branch_ref, list):
raise exceptions.InvalidParameters('Only one ref or branch may be given.')
self.metrics.add('branch_ref_from', ref_from)
for part in path.parts:
part._id = (branch_ref, None)
# TODO Validate that filesha is a valid sha
path.parts[-1]._id = (branch_ref, kwargs.get('fileSha'))
self.metrics.add('file_sha_given', True if kwargs.get('fileSha') else False)
return path
async def revalidate_path(self, base, path, folder=False):
return base.child(path, _id=((base.branch_ref, None)), folder=folder)
def path_from_metadata(self, parent_path, metadata):
"""Build a path from a parent path and a metadata object. Will correctly set the _id
Used for building zip archives."""
file_sha = metadata.extra.get('fileSha', None)
return parent_path.child(metadata.name, _id=(metadata.ref, file_sha), folder=metadata.is_folder, )
def can_duplicate_names(self):
return False
@property
def default_headers(self):
return {'Authorization': 'token {}'.format(self.token)}
@property
def committer(self):
return {
'name': self.name,
'email': self.email,
}
def build_repo_url(self, *segments, **query):
segments = ('repos', self.owner, self.repo) + segments
return self.build_url(*segments, **query)
def can_intra_move(self, other, path=None):
return self.can_intra_copy(other, path=path)
def can_intra_copy(self, other, path=None):
return (
type(self) == type(other) and
self.repo == other.repo and
self.owner == other.owner
)
# do these need async?
async def intra_copy(self, dest_provider, src_path, dest_path):
return (await self._do_intra_move_or_copy(src_path, dest_path, True))
async def intra_move(self, dest_provider, src_path, dest_path):
return (await self._do_intra_move_or_copy(src_path, dest_path, False))
async def download(self, path, revision=None, **kwargs):
'''Get the stream to the specified file on github
:param str path: The path to the file on github
:param str ref: The git 'ref' a branch or commit sha at which to get the file from
:param str fileSha: The sha of file to be downloaded if specifed path will be ignored
:param dict kwargs: Ignored
'''
data = await self.metadata(path, revision=revision)
file_sha = path.file_sha or data.extra['fileSha']
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'blobs', file_sha),
headers={'Accept': 'application/vnd.github.v3.raw'},
expects=(200, ),
throws=exceptions.DownloadError,
)
return streams.ResponseStreamReader(resp, size=data.size)
async def upload(self, stream, path, message=None, branch=None, **kwargs):
assert self.name is not None
assert self.email is not None
try:
exists = await self.exists(path)
except exceptions.ProviderError as e:
if e.data.get('message') == 'Git Repository is empty.':
self.metrics.add('upload.initialized_empty_repo', True)
exists = False
resp = await self.make_request(
'PUT',
self.build_repo_url('contents', '.gitkeep'),
data=json.dumps({
'content': '',
'path': '.gitkeep',
'committer': self.committer,
'branch': path.branch_ref,
'message': 'Initial commit'
}),
expects=(201,),
throws=exceptions.CreateFolderError
)
data = await resp.json()
latest_sha = data['commit']['sha']
else:
latest_sha = await self._get_latest_sha(ref=path.branch_ref)
blob = await self._create_blob(stream)
tree = await self._create_tree({
'base_tree': latest_sha,
'tree': [{
'path': path.path,
'mode': '100644',
'type': 'blob',
'sha': blob['sha']
}]
})
if exists and await self._is_blob_in_tree(blob, path): # Avoids empty commits
return GitHubFileTreeMetadata({
'path': path.path,
'sha': blob['sha'],
'size': stream.size,
}, ref=path.branch_ref), not exists
commit = await self._create_commit({
'tree': tree['sha'],
'parents': [latest_sha],
'committer': self.committer,
'message': message or (settings.UPDATE_FILE_MESSAGE if exists else settings.UPLOAD_FILE_MESSAGE),
})
# Doesn't return anything useful
await self._update_ref(commit['sha'], ref=path.branch_ref)
# You're hacky
return GitHubFileTreeMetadata({
'path': path.path,
'sha': blob['sha'],
'size': stream.size,
}, commit=commit, ref=path.branch_ref), not exists
async def delete(self, path, sha=None, message=None, branch=None,
confirm_delete=0, **kwargs):
"""Delete file, folder, or provider root contents
:param GitHubPath path: GitHubPath path object for file, folder, or root
:param str sha: SHA-1 checksum of file/folder object
:param str message: Commit message
:param str branch: Repository branch
:param int confirm_delete: Must be 1 to confirm root folder delete
"""
assert self.name is not None
assert self.email is not None
if path.is_root:
if confirm_delete == 1:
await self._delete_root_folder_contents(path)
else:
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400,
)
elif path.is_dir:
await self._delete_folder(path, message, **kwargs)
else:
await self._delete_file(path, message, **kwargs)
async def metadata(self, path, **kwargs):
"""Get Metadata about the requested file or folder
:param str path: The path to a file or folder
:rtype dict: if file, metadata object describing the file
:rtype list: if folder, array of metadata objects describing contents
"""
if path.is_dir:
return (await self._metadata_folder(path, **kwargs))
else:
return (await self._metadata_file(path, **kwargs))
async def revisions(self, path, sha=None, **kwargs):
resp = await self.make_request(
'GET',
self.build_repo_url('commits', path=path.path, sha=sha or path.file_sha),
expects=(200, ),
throws=exceptions.RevisionsError
)
return [
GitHubRevision(item)
for item in (await resp.json())
]
async def create_folder(self, path, branch=None, message=None, **kwargs):
GitHubPath.validate_folder(path)
assert self.name is not None
assert self.email is not None
message = message or settings.UPLOAD_FILE_MESSAGE
keep_path = path.child('.gitkeep')
data = {
'content': '',
'path': keep_path.path,
'committer': self.committer,
'branch': path.branch_ref,
'message': message or settings.UPLOAD_FILE_MESSAGE
}
resp = await self.make_request(
'PUT',
self.build_repo_url('contents', keep_path.path),
data=json.dumps(data),
expects=(201, 422, 409),
throws=exceptions.CreateFolderError
)
data = await resp.json()
if resp.status in (422, 409):
if resp.status == 409 or data.get('message') == 'Invalid request.\n\n"sha" wasn\'t supplied.':
raise exceptions.FolderNamingConflict(str(path))
raise exceptions.CreateFolderError(data, code=resp.status)
data['content']['name'] = path.name
data['content']['path'] = data['content']['path'].replace('.gitkeep', '')
return GitHubFolderContentMetadata(data['content'], commit=data['commit'], ref=path.branch_ref)
async def _delete_file(self, path, message=None, **kwargs):
if path.file_sha:
sha = path.file_sha
else:
sha = (await self.metadata(path)).extra['fileSha']
if not sha:
raise exceptions.MetadataError('A sha is required for deleting')
data = {
'sha': sha,
'branch': path.branch_ref,
'committer': self.committer,
'message': message or settings.DELETE_FILE_MESSAGE,
}
resp = await self.make_request(
'DELETE',
self.build_repo_url('contents', path.path),
headers={'Content-Type': 'application/json'},
data=json.dumps(data),
expects=(200, ),
throws=exceptions.DeleteError,
)
await resp.release()
async def _delete_folder(self, path, message=None, **kwargs):
branch_data = await self._fetch_branch(path.branch_ref)
old_commit_sha = branch_data['commit']['sha']
old_commit_tree_sha = branch_data['commit']['commit']['tree']['sha']
# e.g. 'level1', 'level2', or ''
tree_paths = path.parts[1:]
trees = [{
'target': tree_paths[0].value,
'tree': [
{
'path': item['path'],
'mode': item['mode'],
'type': item['type'],
'sha': item['sha'],
}
for item in (await self._fetch_tree(old_commit_tree_sha))['tree']
]
}]
for idx, tree_path in enumerate(tree_paths[:-1]):
try:
tree_sha = next(x for x in trees[-1]['tree'] if x['path'] == tree_path.value)['sha']
except StopIteration:
raise exceptions.MetadataError(
'Could not delete folder \'{0}\''.format(path),
code=404,
)
trees.append({
'target': tree_paths[idx + 1].value,
'tree': [
{
'path': item['path'],
'mode': item['mode'],
'type': item['type'],
'sha': item['sha'],
}
for item in (await self._fetch_tree(tree_sha))['tree']
]
})
# The last tree's structure is rewritten w/o the target folder, all others
# in the hierarchy are simply updated to reflect this change.
tree = trees.pop()
if tree['target'] == '':
# Git Empty SHA
tree_sha = GIT_EMPTY_SHA
else:
# Delete the folder from the tree cast to list iterator over all values
current_tree = tree['tree']
tree['tree'] = list(filter(lambda x: x['path'] != tree['target'], tree['tree']))
if current_tree == tree['tree']:
raise exceptions.NotFoundError(str(path))
tree_data = await self._create_tree({'tree': tree['tree']})
tree_sha = tree_data['sha']
# Update parent tree(s)
for tree in reversed(trees):
for item in tree['tree']:
if item['path'] == tree['target']:
item['sha'] = tree_sha
break
tree_data = await self._create_tree({'tree': tree['tree']})
tree_sha = tree_data['sha']
# Create a new commit which references our top most tree change.
message = message or settings.DELETE_FOLDER_MESSAGE
commit_resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps({
'message': message,
'committer': self.committer,
'tree': tree_sha,
'parents': [
old_commit_sha,
],
}),
expects=(201, ),
throws=exceptions.DeleteError,
)
commit_data = await commit_resp.json()
commit_sha = commit_data['sha']
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
resp = await self.make_request(
'PATCH',
self.build_repo_url('git', 'refs', 'heads', path.branch_ref),
headers={'Content-Type': 'application/json'},
data=json.dumps({'sha': commit_sha}),
expects=(200, ),
throws=exceptions.DeleteError,
)
await resp.release()
async def _delete_root_folder_contents(self, path, message=None, **kwargs):
"""Delete the contents of the root folder.
:param GitHubPath path: GitHubPath path object for folder
:param str message: Commit message
"""
branch_data = await self._fetch_branch(path.branch_ref)
old_commit_sha = branch_data['commit']['sha']
tree_sha = GIT_EMPTY_SHA
message = message or settings.DELETE_FOLDER_MESSAGE
commit_resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps({
'message': message,
'committer': self.committer,
'tree': tree_sha,
'parents': [
old_commit_sha,
],
}),
expects=(201, ),
throws=exceptions.DeleteError,
)
commit_data = await commit_resp.json()
commit_sha = commit_data['sha']
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
await self.make_request(
'PATCH',
self.build_repo_url('git', 'refs', 'heads', path.branch_ref),
headers={'Content-Type': 'application/json'},
data=json.dumps({'sha': commit_sha}),
expects=(200, ),
throws=exceptions.DeleteError,
)
async def _fetch_branch(self, branch):
resp = await self.make_request(
'GET',
self.build_repo_url('branches', branch)
)
if resp.status == 404:
await resp.release()
raise exceptions.NotFoundError('. No such branch \'{}\''.format(branch))
return (await resp.json())
async def _fetch_contents(self, path, ref=None):
url = furl.furl(self.build_repo_url('contents', path.path))
if ref:
url.args.update({'ref': ref})
resp = await self.make_request(
'GET',
url.url,
expects=(200, ),
throws=exceptions.MetadataError
)
return (await resp.json())
async def _fetch_repo(self):
resp = await self.make_request(
'GET',
self.build_repo_url(),
expects=(200, ),
throws=exceptions.MetadataError
)
return (await resp.json())
async def _fetch_tree(self, sha, recursive=False):
url = furl.furl(self.build_repo_url('git', 'trees', sha))
if recursive:
url.args.update({'recursive': 1})
resp = await self.make_request(
'GET',
url.url,
expects=(200, ),
throws=exceptions.MetadataError
)
tree = await resp.json()
if tree['truncated']:
raise GitHubUnsupportedRepoError
return tree
async def _search_tree_for_path(self, path, tree_sha, recursive=True):
"""Search through the given tree for an entity matching the name and type of `path`.
"""
tree = await self._fetch_tree(tree_sha, recursive=True)
if tree['truncated']:
raise GitHubUnsupportedRepoError
implicit_type = 'tree' if path.endswith('/') else 'blob'
for entity in tree['tree']:
if entity['path'] == path.strip('/') and entity['type'] == implicit_type:
return entity
raise exceptions.NotFoundError(str(path))
async def _create_tree(self, tree):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'trees'),
headers={'Content-Type': 'application/json'},
data=json.dumps(tree),
expects=(201, ),
throws=exceptions.ProviderError,
)
return (await resp.json())
async def _create_commit(self, commit):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps(commit),
expects=(201, ),
throws=exceptions.ProviderError,
)
return (await resp.json())
async def _create_blob(self, stream):
blob_stream = streams.JSONStream({
'encoding': 'base64',
'content': streams.Base64EncodeStream(stream),
})
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'blobs'),
data=blob_stream,
headers={
'Content-Type': 'application/json',
'Content-Length': str(blob_stream.size),
},
expects=(201, ),
throws=exceptions.UploadError,
)
return (await resp.json())
def _is_sha(self, ref):
# sha1 is always 40 characters in length
try:
if len(ref) != 40:
return False
# sha1 is always base 16 (hex)
int(ref, 16)
except (TypeError, ValueError, ):
return False
return True
def _web_view(self, path):
segments = (self.owner, self.repo, 'blob', path.branch_ref, path.path)
return provider.build_url(settings.VIEW_URL, *segments)
async def _metadata_folder(self, path, **kwargs):
ref = path.branch_ref
try:
# it's cool to use the contents API here because we know path is a dir and won't hit
# the 1mb size limit
data = await self._fetch_contents(path, ref=ref)
except exceptions.MetadataError as e:
if e.data.get('message') == 'This repository is empty.':
data = []
else:
raise
if isinstance(data, dict):
raise exceptions.MetadataError(
'Could not retrieve folder "{0}"'.format(str(path)),
code=404,
)
ret = []
for item in data:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item, ref=ref))
else:
ret.append(GitHubFileContentMetadata(item, ref=ref, web_view=item['html_url']))
return ret
async def _metadata_file(self, path, revision=None, **kwargs):
resp = await self.make_request(
'GET',
self.build_repo_url('commits', path=path.path, sha=revision or path.branch_ref),
expects=(200, ),
throws=exceptions.MetadataError,
)
commits = await resp.json()
if not commits:
raise exceptions.NotFoundError(str(path))
latest = commits[0]
tree = await self._fetch_tree(latest['commit']['tree']['sha'], recursive=True)
try:
data = next(
x for x in tree['tree']
if x['path'] == path.path
)
except StopIteration:
raise exceptions.NotFoundError(str(path))
if isinstance(data, list):
raise exceptions.MetadataError(
'Could not retrieve file "{0}"'.format(str(path)),
code=404,
)
return GitHubFileTreeMetadata(
data, commit=latest['commit'], web_view=self._web_view(path),
ref=path.branch_ref
)
async def _get_latest_sha(self, ref='master'):
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'refs', 'heads', ref),
expects=(200, ),
throws=exceptions.ProviderError
)
data = await resp.json()
return data['object']['sha']
async def _update_ref(self, sha, ref='master'):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'refs', 'heads', ref),
data=json.dumps({
'sha': sha,
}),
expects=(200, ),
throws=exceptions.ProviderError
)
return (await resp.json())
async def _do_intra_move_or_copy(self, src_path, dest_path, is_copy):
# ON PATHS:
# WB and GH use slightly different default conventions for their paths, so we often
# have to munge our WB paths before comparison. Here is a quick overview:
# WB (dirs): wb_dir.path == 'foo/bar/' str(wb_dir) == '/foo/bar/'
# WB (file): wb_file.path = 'foo/bar.txt' str(wb_file) == '/foo/bar.txt'
# GH (dir): 'foo/bar'
# GH (file): 'foo/bar.txt'
src_tree, src_head = await self._get_tree_and_head(src_path.branch_ref)
# these are the blobs to copy/move
blobs = [
item
for item in src_tree['tree']
if src_path.is_dir and item['path'].startswith(src_path.path) or
src_path.is_file and item['path'] == src_path.path
]
if len(blobs) == 0:
raise exceptions.NotFoundError(str(src_path))
if src_path.is_file:
assert len(blobs) == 1, 'Found multiple targets'
commit_msg = settings.COPY_MESSAGE if is_copy else settings.MOVE_MESSAGE
commit = None
if src_path.branch_ref == dest_path.branch_ref:
exists = self._path_exists_in_tree(src_tree['tree'], dest_path)
# if we're overwriting an existing dir, we must remove its blobs from the tree
if dest_path.is_dir:
src_tree['tree'] = self._remove_path_from_tree(src_tree['tree'], dest_path)
# if this is a copy, duplicate and append our source blobs. The originals will be updated
# with the new destination path.
if is_copy:
src_tree['tree'].extend(copy.deepcopy(blobs))
# see, I told you they'd be overwritten
self._reparent_blobs(blobs, src_path, dest_path)
src_tree['tree'] = self._prune_subtrees(src_tree['tree'])
commit = await self._commit_tree_and_advance_branch(src_tree, {'sha': src_head},
commit_msg, src_path.branch_ref)
else:
dest_tree, dest_head = await self._get_tree_and_head(dest_path.branch_ref)
exists = self._path_exists_in_tree(dest_tree['tree'], dest_path)
dest_tree['tree'] = self._remove_path_from_tree(dest_tree['tree'], dest_path)
new_blobs = copy.deepcopy(blobs)
self._reparent_blobs(new_blobs, src_path, dest_path)
dest_tree['tree'].extend(new_blobs)
dest_tree['tree'] = self._prune_subtrees(dest_tree['tree'])
commit = await self._commit_tree_and_advance_branch(dest_tree, {'sha': dest_head},
commit_msg, dest_path.branch_ref)
if not is_copy:
src_tree['tree'] = self._remove_path_from_tree(src_tree['tree'], src_path)
src_tree['tree'] = self._prune_subtrees(src_tree['tree'])
await self._commit_tree_and_advance_branch(src_tree, {'sha': src_head},
commit_msg, src_path.branch_ref)
blobs = new_blobs # for the metadata
if dest_path.is_file:
assert len(blobs) == 1, 'Destination file should have exactly one candidate'
return GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), not exists
folder = GitHubFolderTreeMetadata({
'path': dest_path.path.strip('/')
}, commit=commit, ref=dest_path.branch_ref)
folder.children = []
for item in blobs:
if item['path'] == dest_path.path.rstrip('/'):
continue
if item['type'] == 'tree':
folder.children.append(GitHubFolderTreeMetadata(item, ref=dest_path.branch_ref))
else:
folder.children.append(GitHubFileTreeMetadata(item, ref=dest_path.branch_ref))
return folder, not exists
async def _get_blobs_and_trees(self, branch_ref):
"""This method takes a branch ref (usually the branch name) to call the github api and
returns a flat list of a repo's blobs and trees (with no commits).
:param str branch_ref: The reference which leads to the branch, that the blobs and trees
are gathered from.
:returns dict response json: This is a JSON dict with the flattened list of blobs and trees
include in the dict.
"""
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'trees') + '/{}:?recursive=99999'.format(branch_ref),
expects=(200,)
)
return await resp.json()
async def _is_blob_in_tree(self, new_blob, path):
"""This method checks to see if a branch's tree already contains a blob with the same sha
and at the path provided, basically checking if a new blob has identical path and has
identical content to a blob already in the tree. This ensures we don't overwrite a blob if
it serves no purpose.
:param dict new_blob: a dict with data and metadata of the newly created blob which is not
yet committed.
:param GitHubPath path: The path where the newly created blob is to be committed.
:returns: bool: True if new_blob is in the tree, False if no blob or a different blob
exists at the path given
"""
blob_tree = await self._get_blobs_and_trees(path.branch_ref)
return any(new_blob['sha'] == blob['sha'] and
path.path == blob['path'] for blob in blob_tree['tree'])
async def _get_tree_and_head(self, branch):
"""Fetch the head commit and tree for the given branch.
:param str branch: The branch to fetch
:returns dict: A GitHub tree object. Contents are under the ``tree`` key.
:returns dict: A GitHub commit object. The SHA is under the ``sha`` key.
"""
branch_data = await self._fetch_branch(branch)
head = branch_data['commit']['sha']
tree_sha = branch_data['commit']['commit']['tree']['sha']
tree = await self._fetch_tree(tree_sha, recursive=True)
return tree, head
def _path_exists_in_tree(self, tree, path):
"""Search through a tree and return true if the given path is found.
:param list tree: A list of blobs in a git tree.
:param GitHubPath path: The path to search for.
:returns bool: true if ``path`` is found in ``tree``
"""
return any(x['path'] == path.path.rstrip('/') for x in tree)
def _remove_path_from_tree(self, tree, path):
"""Search through a tree and remove any blobs or trees that match ``path`` or are a child of
``path``.
:param list tree: A list of blobs in a git tree.
:param GitHubPath path: The path to exclude.
:returns list: A new list containing the filtered tree contents.
"""
return [
item
for item in tree
if (path.is_file and not item['path'] == path.path) or # file != path
(path.is_dir and not
(item['path'].startswith(path.path) or # file/folder != child of path
(item['type'] == 'tree' and item['path'] == path.path.rstrip('/')))) # folder != path
]
def _reparent_blobs(self, blobs, src_path, dest_path):
"""Take a list of blobs and replace the source path with the dest path.
Two caveats:
* This method operates on the list of blobs in place. This is intentional. Anything you pass
as the ``blobs`` arg will be mutated back in the calling scope.
* This method assumes that the list of blobs all begin with ``src_path``, since its purpose
is to rewite all the blobs found at or under ``src_path`` to be at or under ``dest_path``.
If you pass it something that is not located under ``src_path``, a later part of the path
may be updated.
:param list blobs: A list of blobs whose paths should be updated.
:param GitHubPath src_path: The original path.
:param GitHubPath dest_path: The new path.
:returns None: This methods returns **nothing**. It operates on the blobs in-place.
"""
for blob in blobs:
if blob['path'] == src_path.path.rstrip('/') and blob['type'] == 'tree':
# Renaming the parent folder is not necessary. Tress are pruned before uploading
# to GH. This is only here because at somepoint someone will use it without pruning
# and wonder why on earth the parent folder isn't renamed.
blob['path'] = dest_path.path.rstrip('/')
else:
blob['path'] = blob['path'].replace(src_path.path, dest_path.path, 1)
return
def _prune_subtrees(self, tree):
"""Takes in a list representing a git tree and remove all the entries that are also trees.
Only blobs should remain. GitHub infers tree structure from blob paths. Deleting a blob
without removing its parent tree will result in the blob *NOT* being deleted. See:
http://www.levibotelho.com/development/commit-a-file-with-the-github-api/
:param list tree: A list representing a git tree. May contain trees, in addition to blobs.
:returns list: A new list containing just the blobs.
"""
return [item for item in tree if item['type'] != 'tree']
async def _commit_tree_and_advance_branch(self, old_tree, old_head, commit_msg, branch_ref):
"""Utilty method to bundle several commands into one. Takes a tree, head commit, a message,
and a branch, creates a new commit pointing to tree, then advances branch to point to the
new commit. Basically the same thing as ``git commit -am "foo message"`` on the command
line. Returns the new commit.
:param list old_tree: A list of blobs representing the new file tree.
:param dict old_head: The commit object will be the parent of the new commit. Must have 'sha' key.
:param str commit_msg: The commit message for the new commit.
:param str branch_ref: The branch that will be advanced to the new commit.
:returns dict new_head: The commit object returned by GitHub.
"""
new_tree = await self._create_tree({'tree': old_tree['tree']})
# Create a new commit which references our top most tree change.
if new_tree['sha'] == old_tree['sha']: # prevents empty commits
return None
else:
new_head = await self._create_commit({
'tree': new_tree['sha'],
'parents': [old_head['sha']],
'committer': self.committer,
'message': commit_msg,
})
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
await self._update_ref(new_head['sha'], ref=branch_ref)
return new_head
|
py | 1a36c25c41e073acea41320420828d842f3702f5 | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.federation.transport.client import StateRequestResponse
from synapse.logging.context import LoggingContext
from synapse.rest import admin
from synapse.rest.client import login, room
from tests import unittest
from tests.test_utils import event_injection, make_awaitable
class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
# mock out the federation transport client
self.mock_federation_transport_client = mock.Mock(
spec=["get_room_state_ids", "get_room_state", "get_event"]
)
return super().setup_test_homeserver(
federation_transport_client=self.mock_federation_transport_client
)
def test_process_pulled_event_with_missing_state(self) -> None:
"""Ensure that we correctly handle pulled events with lots of missing state
In this test, we pretend we are processing a "pulled" event (eg, via backfill
or get_missing_events). The pulled event has a prev_event we haven't previously
seen, so the server requests the state at that prev_event. There is a lot
of state we don't have, so we expect the server to make a /state request.
We check that the pulled event is correctly persisted, and that the state is
as we expect.
"""
return self._test_process_pulled_event_with_missing_state(False)
def test_process_pulled_event_with_missing_state_where_prev_is_outlier(
self,
) -> None:
"""Ensure that we correctly handle pulled events with lots of missing state
A slight modification to test_process_pulled_event_with_missing_state. Again
we have a "pulled" event which refers to a prev_event with lots of state,
but in this case we already have the prev_event (as an outlier, obviously -
if it were a regular event, we wouldn't need to request the state).
"""
return self._test_process_pulled_event_with_missing_state(True)
def _test_process_pulled_event_with_missing_state(
self, prev_exists_as_outlier: bool
) -> None:
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
main_store = self.hs.get_datastores().main
state_storage_controller = self.hs.get_storage_controllers().state
# create the room
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
room_version = self.get_success(main_store.get_room_version(room_id))
# allow the remote user to send state events
self.helper.send_state(
room_id,
"m.room.power_levels",
{"events_default": 0, "state_default": 0},
tok=tok,
)
# add the remote user to the room
member_event = self.get_success(
event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
)
initial_state_map = self.get_success(
main_store.get_partial_current_state_ids(room_id)
)
auth_event_ids = [
initial_state_map[("m.room.create", "")],
initial_state_map[("m.room.power_levels", "")],
initial_state_map[("m.room.join_rules", "")],
member_event.event_id,
]
# mock up a load of state events which we are missing
state_events = [
make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_state_type",
"state_key": f"state_{i}",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [member_event.event_id],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 10,
"content": {"body": f"state_{i}"},
}
),
room_version,
)
for i in range(1, 10)
]
# this is the state that we are going to claim is active at the prev_event.
state_at_prev_event = state_events + self.get_success(
main_store.get_events_as_list(initial_state_map.values())
)
# mock up a prev event.
# Depending on the test, we either persist this upfront (as an outlier),
# or let the server request it.
prev_event = make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_regular_type",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 11,
"content": {"body": "missing_prev"},
}
),
room_version,
)
if prev_exists_as_outlier:
prev_event.internal_metadata.outlier = True
persistence = self.hs.get_storage_controllers().persistence
self.get_success(
persistence.persist_event(
prev_event,
EventContext.for_outlier(self.hs.get_storage_controllers()),
)
)
else:
async def get_event(destination: str, event_id: str, timeout=None):
self.assertEqual(destination, self.OTHER_SERVER_NAME)
self.assertEqual(event_id, prev_event.event_id)
return {"pdus": [prev_event.get_pdu_json()]}
self.mock_federation_transport_client.get_event.side_effect = get_event
# mock up a regular event to pass into _process_pulled_event
pulled_event = make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_regular_type",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [prev_event.event_id],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 12,
"content": {"body": "pulled"},
}
),
room_version,
)
# we expect an outbound request to /state_ids, so stub that out
self.mock_federation_transport_client.get_room_state_ids.return_value = (
make_awaitable(
{
"pdu_ids": [e.event_id for e in state_at_prev_event],
"auth_chain_ids": [],
}
)
)
# we also expect an outbound request to /state
self.mock_federation_transport_client.get_room_state.return_value = (
make_awaitable(
StateRequestResponse(auth_events=[], state=state_at_prev_event)
)
)
# we have to bump the clock a bit, to keep the retry logic in
# FederationClient.get_pdu happy
self.reactor.advance(60000)
# Finally, the call under test: send the pulled event into _process_pulled_event
with LoggingContext("test"):
self.get_success(
self.hs.get_federation_event_handler()._process_pulled_event(
self.OTHER_SERVER_NAME, pulled_event, backfilled=False
)
)
# check that the event is correctly persisted
persisted = self.get_success(main_store.get_event(pulled_event.event_id))
self.assertIsNotNone(persisted, "pulled event was not persisted at all")
self.assertFalse(
persisted.internal_metadata.is_outlier(), "pulled event was an outlier"
)
# check that the state at that event is as expected
state = self.get_success(
state_storage_controller.get_state_ids_for_event(pulled_event.event_id)
)
expected_state = {
(e.type, e.state_key): e.event_id for e in state_at_prev_event
}
self.assertEqual(state, expected_state)
if prev_exists_as_outlier:
self.mock_federation_transport_client.get_event.assert_not_called()
|
py | 1a36c27c749aff512c9734e2f222a5bd4cb45efe | import sys
import matplotlib.pyplot as plt
import numpy as np
# define a function reading the data from file
# File format: arbitrary number of pairs of lines
# 1st line: <key> = <float value> pairs, comma separated
# 2nd line: comma separated float values
# Here special keys: 'a','b','T'
def readdat(fname):
params = {} # dictionary for parameters
data = [] # list of arrays for data
# Open the data file
with open(fname) as f:
# Read next line of data file into a string variable
for line in f.readlines():
# Split string into comma-separated substrings and store them in a list
tmp = line.split(sep=",")
# Parameter line: contains '='
if "=" in line: # parameter line
tmp2 = [a.split(sep="=") for a in tmp]
params.update({k.strip() : v.strip() for k, v in tmp2})
else: # data line
try:
# Check whether data are numbers
float(tmp[0])
# Append list of numbers to list of data arrays
data += [[float(v) for v in tmp]]
except:
pass
return params, data
def plotfvsol(filename):
params, datas = readdat(filename)
print("Parameters: ",params)
# Extract relevant parameters
try:
a = float(params['a'])
b = float(params['b'])
T = float(params['T'])
except:
print("Missing parameters a, b, T!")
# Ensure that [a,b] is an interval
if a>b:
a, b = b, a
# Plot data
fig, ax = plt.subplots() # Create a figure containing a single axis
for i, data in enumerate(datas):
print("|data[", i, "]| = ",len(data))
# Number of cell values
N = len(data)
h = (b-a)/N
x = np.linspace(a+h/2,b-h/2,N)
plt.plot(x,data,label=str('N={:d}'.format(N)),linewidth=1)
plt.title(filename + ': solution at T = ' + str(T));
plt.xlabel('x')
plt.ylabel('u(x,t)')
plt.legend()
plt.show()
# Save figure
outfnname = filename.split(sep='.')
plt.savefig(outfnname[0] + ".eps")
print("Figure saved in" + outfnname[0] + ".eps")
plt.close()
if __name__ == "__main__":
filename = sys.argv[1]
print ("Reading data from ", filename)
plotfvsol(filename)
|
py | 1a36c2ac77c15363cf0ea0fa639fd95187cbcba2 | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
__author__ = """\n""".join(['Salim Fadhley',
'Aric Hagberg ([email protected])'
])
__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
'GraphMLWriter', 'GraphMLReader']
import networkx as nx
from networkx.utils import _get_fh, make_str
import warnings
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
def write_graphml(G, path, encoding='utf-8',prettyprint=True):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
fh = _get_fh(path, mode='wb')
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
writer.dump(fh)
def generate_graphml(G, encoding='utf-8',prettyprint=True):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_graphml(G)) # a string
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
for line in str(writer).splitlines():
yield line
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
fh=_get_fh(path,mode='rb')
reader = GraphMLReader(node_type=node_type)
# need to check for multiple graphs
glist=list(reader(fh))
return glist[0]
class GraphML(object):
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
#xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = \
' '.join(['http://graphml.graphdrawing.org/xmlns',
'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
try:
chr(12345) # Fails on Py!=3.
unicode = str # Py3k's str is our unicode type
except ValueError:
pass
types=((str,"yfiles"),(str,"string"), (unicode,"string"),
(int,"int"), (int,"integer"), (float,"float"), (float,"double"),
(bool, "boolean"))
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
class GraphMLWriter(GraphML):
def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.encoding = encoding
self.xml = Element("graphml",
{'xmlns':self.NS_GRAPHML,
'xmlns:xsi':self.NS_XSI,
'xsi:schemaLocation':self.SCHEMALOCATION}
)
self.keys={}
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
new_id = "d%i" % len(list(self.keys))
self.keys[keys_key] = new_id
key_kwargs = {"id":new_id,
"for":scope,
"attr.name":name,
"attr.type":attr_type}
key_element=Element("key",**key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element=Element("default")
default_element.text=make_str(default)
key_element.append(default_element)
self.xml.insert(0,key_element)
return new_id
def add_data(self, name, element_type, value,
scope="all",
default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
raise nx.NetworkXError('GraphML writer does not support '
'dict types as data values.')
key_id = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = Element("data", key=key_id)
data_element.text = make_str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attributes to edges or nodes.
"""
for k,v in data.items():
default_value=default.get(k)
obj=self.add_data(make_str(k), type(v), make_str(v),
scope=scope, default=default_value)
xml_obj.append(obj)
def add_nodes(self, G, graph_element):
for node,data in G.nodes_iter(data=True):
node_element = Element("node", id = make_str(node))
default=G.graph.get('node_default',{})
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
self.add_attributes("edge", edge_element,
{'key':key}, default)
graph_element.append(edge_element)
else:
for u,v,data in G.edges_iter(data=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type='directed'
else:
default_edge_type='undirected'
graphid=G.graph.pop('id',None)
if graphid is None:
graph_element = Element("graph",
edgedefault = default_edge_type)
else:
graph_element = Element("graph",
edgedefault = default_edge_type,
id=graphid)
default={}
data=dict((k,v) for (k,v) in G.graph.items()
if k not in ['node_default','edge_default'])
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
"""
Add many graphs to this GraphML document.
"""
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
stream.write(header.encode(self.encoding))
document.write(stream, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects.
"""
def __init__(self, node_type=str):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
self.multigraph=False # assume multigraph and test for parallel edges
def __call__(self, stream):
self.xml = ElementTree(file=stream)
(keys,defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# set defaults for graph attributes
for key_id,value in defaults.items():
key_for=graphml_keys[key_id]['for']
name=graphml_keys[key_id]['name']
python_type=graphml_keys[key_id]['type']
if key_for=='node':
G.graph['node_default']={name:python_type(value)}
if key_for=='edge':
G.graph['edge_default']={name:python_type(value)}
# hyperedges are not supported
hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML)
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader does not support hyperedges")
# add nodes
for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML):
self.add_node(G, node_xml, graphml_keys)
# add edges
for edge_xml in graph_xml.findall("{%s}edge" % self.NS_GRAPHML):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found.
if not self.multigraph:
if G.is_directed():
return nx.DiGraph(G)
else:
return nx.Graph(G)
else:
return G
def add_node(self, G, node_xml, graphml_keys):
"""Add a node to the graph.
"""
# warn on finding unsupported ports tag
ports=node_xml.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, data)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph.
"""
# warn on finding unsupported ports tag
ports=edge_element.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed=='false':
raise nx.NetworkXError(\
"directed=false edge found in directed graph.")
if (not G.is_directed()) and directed=='true':
raise nx.NetworkXError(\
"directed=true edge found in undirected graph.")
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
data["id"] = edge_id
if G.has_edge(source,target):
# mark this as a multigraph
self.multigraph=True
if edge_id is None:
# no id specified, try using 'key' attribute as id
edge_id=data.pop('key',None)
G.add_edge(source, target, key=edge_id, **data)
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML):
key = data_element.get("key")
try:
data_name=graphml_keys[key]['name']
data_type=graphml_keys[key]['type']
except KeyError:
raise nx.NetworkXError("Bad GraphML data: no key %s"%key)
text=data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element))==0:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = data_element.find("{%s}ShapeNode/{%s}NodeLabel"%
(self.NS_Y, self.NS_Y))
if node_label is not None:
data['label'] = node_label.text
edge_label = data_element.find("{%s}PolyLineEdge/{%s}EdgeLabel"%
(self.NS_Y, (self.NS_Y)))
if edge_label is not None:
data['label'] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml.
"""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall("{%s}key" % self.NS_GRAPHML):
attr_id = k.get("id")
attr_type=k.get('attr.type')
attr_name=k.get("attr.name")
if attr_type is None:
attr_name=k.get('yfiles.type')
attr_type='yfiles'
if attr_name is None:
raise nx.NetworkXError("Unknown key type in file.")
graphml_keys[attr_id] = {
"name":attr_name,
"type":self.python_type[attr_type],
"for":k.get("for")}
# check for "default" subelement of key element
default=k.find("{%s}default" % self.NS_GRAPHML)
if default is not None:
graphml_key_defaults[attr_id]=default.text
return graphml_keys,graphml_key_defaults
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.ElementTree
except:
raise SkipTest("xml.etree.ElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.graphml')
except:
pass
|
py | 1a36c5b277ff63ac945df7f95899119417095979 | import uvicorn
import os
from diskcache import Cache
from fastapi import FastAPI, File, UploadFile
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from starlette.requests import Request
from src.helpers.milvus_helpers import MilvusHelper
from src.helpers.mysql_helpers import MySQLHelper
from src.config import UPLOAD_PATH
from src.operations.load import do_load
from src.operations.search import do_search
from src.operations.count import do_count
from src.operations.drop import do_drop
from src.config import TOP_K
from src.logs import LOGGER
from pydantic import BaseModel
from typing import Optional
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
MODEL = None
MILVUS_CLI = MilvusHelper()
MYSQL_CLI = MySQLHelper()
# Mkdir 'tmp/mol-data'
if not os.path.exists(UPLOAD_PATH):
os.makedirs(UPLOAD_PATH)
LOGGER.info("mkdir the path:{} ".format(UPLOAD_PATH))
@app.get('/data')
def mols_img(mols_path):
# Get the molecular image file
try:
LOGGER.info(("Successfully load molecular image: {}".format(mols_path)))
return FileResponse(UPLOAD_PATH + '/' + mols_path + '.png')
except Exception as e:
LOGGER.error("upload image error: {}".format(e))
return {'status': False, 'msg': e}, 400
@app.get('/progress')
def get_progress():
# Get the progress of dealing with data
try:
cache = Cache('./tmp')
return "current: {}, total: {}".format(cache['current'], cache['total'])
except Exception as e:
LOGGER.error("upload data error: {}".format(e))
return {'status': False, 'msg': e}, 400
class Item(BaseModel):
Table: Optional[str] = None
File: str
@app.post('/data/load')
async def load_data(item: Item):
# Insert all the data under the file path to Milvus/MySQL
try:
total_num = do_load(item.Table, item.File, MODEL, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully loaded data, total count: {}".format(total_num))
return {'status': True, 'msg': "Successfully loaded data!"}
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
class Item_search(BaseModel):
Table: Optional[str] = None
Mol: str
Num: Optional[int] = TOP_K
@app.post('/data/search')
async def search_data(request: Request, item: Item_search):
# Search the upload image in Milvus/MySQL
try:
# Save the upload data to server.
ids, paths, distances = do_search(item.Table, item.Mol, item.Num, MODEL, MILVUS_CLI, MYSQL_CLI)
host = request.headers['host']
for i in range(len(ids)):
tmp = "http://" + str(host) + "/data?mols_path=" + str(ids[i])
ids[i] = tmp
res = dict(zip(paths, zip(ids, distances)))
res = sorted(res.items(), key=lambda item: item[1][1])
LOGGER.info("Successfully searched similar data!")
return res
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
@app.post('/data/count')
async def count_data(table_name: str = None):
# Returns the total number of data in the system
try:
num = do_count(table_name, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully count the number of data!")
return num
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
@app.post('/data/drop')
async def drop_tables(table_name: str = None):
# Delete the collection of Milvus and MySQL
try:
status = do_drop(table_name, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully drop tables in Milvus and MySQL!")
return status
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
if __name__ == '__main__':
uvicorn.run(app=app, host='0.0.0.0', port=5000)
|
py | 1a36c5cf97889dfbe2a5da8efbd90747d38f24af | #!/usr/bin/env python3
"""
How to Run
-s keepItSimple -a 1 -a 2 -c -A -B
"""
__author__ = "Your Name"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
from logzero import logger
def main(args):
""" Main entry point of the app """
# logger.info("hello world")
# logger.info(args)
print( 'simple_value =', args.simple_value )
print( 'constant_value =', args.constant_value )
print( 'boolean_switch =', args.boolean_switch )
print( 'collection =', args.collection )
print( 'const_collection =', args.const_collection )
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
## Required positional argument
#parser.add_argument("arg", help="Required positional argument")
# Optional argument which requires a parameter (eg. -s keepItSimple)
parser.add_argument('-s', action='store', dest='simple_value',
help='Store a simple value')
parser.add_argument('-c', action='store_const', dest='constant_value',
const='value-to-store',
help='Store a constant value')
# Optional argument flag to true
parser.add_argument('-t', action='store_true', default=False,
dest='boolean_switch',
help='Set a switch to true')
# Optional argument flag to make true
parser.add_argument('-f', action='store_false', default=False,
dest='boolean_switch',
help='Set a switch to false')
parser.add_argument('-a', action='append', dest='collection',
default=[],
help='Add repeated values to a list',
)
parser.add_argument('-A', action='append_const', dest='const_collection',
const='value-1-to-append',
default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection',
const='value-2-to-append',
help='Add different values to list')
# Optional verbosity counter (eg. -v, -vv, -vvv, etc.)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Verbosity (-v, -vv, etc)")
# Specify output of "--version"
parser.add_argument(
"--version",
action="version",
version="%(prog)s (version {version})".format(version=__version__))
args = parser.parse_args()
main(args)
|
py | 1a36c666e3250a87e1b05d618e7e4d6e6416d2c2 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Two-qubit XX-rotation gate.
"""
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
class RXXGate(Gate):
"""Two-qubit XX-rotation gate.
This gate corresponds to the rotation U(θ) = exp(-1j * θ * X⊗X / 2)
up to the phase exp(-1j * θ/2).
"""
def __init__(self, theta):
"""Create new rxx gate."""
super().__init__('rxx', 2, [theta])
def _define(self):
"""Calculate a subcircuit that implements this unitary."""
from qiskit.extensions.standard.x import CXGate
from qiskit.extensions.standard.u1 import U1Gate
from qiskit.extensions.standard.h import HGate
definition = []
q = QuantumRegister(2, 'q')
theta = self.params[0]
rule = [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return RXXGate(-self.params[0])
# NOTE: we should use the following as the canonical matrix
# definition but we don't include it yet since it differs from
# the circuit decomposition matrix by a global phase
# def to_matrix(self):
# """Return a Numpy.array for the RXX gate."""
# theta = float(self.params[0])
# return np.array([
# [np.cos(theta / 2), 0, 0, -1j * np.sin(theta / 2)],
# [0, np.cos(theta / 2), -1j * np.sin(theta / 2), 0],
# [0, -1j * np.sin(theta / 2), np.cos(theta / 2), 0],
# [-1j * np.sin(theta / 2), 0, 0, np.cos(theta / 2)]], dtype=complex)
def rxx(self, theta, qubit1, qubit2):
"""Apply RXX to circuit."""
return self.append(RXXGate(theta), [qubit1, qubit2], [])
QuantumCircuit.rxx = rxx
|
py | 1a36c70922b7eab867b9a0d32d8ae39cc6f4b0d9 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from .kmeans import MultiKMeans
from .kmeans import KMeans
from .kernels import PQDecodeCUDA
from .PQ import PQ
from .CustomModule import CustomModule
class MPQ(CustomModule):
def __init__(
self,
d_vector,
n_subvectors=8,
n_clusters=256,
distance="euclidean",
verbose=0,
n_codebooks=64,
):
super(MPQ, self).__init__()
assert d_vector % n_subvectors == 0
self.n_codebooks = n_codebooks
self.d_vector = d_vector
self.n_subvectors = n_subvectors
self.d_subvector = d_vector // n_subvectors
self.n_clusters = n_clusters
self.distance = distance
self.verbose = verbose
self.group_size=512
#codebook: [n_codebooks, n_subvectors, d_subvectors, n_clusters]
self.register_buffer("codebook", None)
self.kmeans = MultiKMeans(
n_clusters = n_clusters,
distance = distance,
max_iter = 25,
verbose = verbose,
)
self.codebook_selector = KMeans(
n_clusters = n_codebooks,
distance = distance,
max_iter = 25,
verbose = verbose,
)
self._decode_cuda = PQDecodeCUDA(tm=2, td=8)
def train(self, x):
"""
x: shape: [d_vector, n_data]
"""
labels = self.codebook_selector.fit(x)
# print("labels", labels.shape, labels.unique().shape )
unique_labels = labels.unique()
codebook = torch.zeros(
self.n_codebooks,
self.n_subvectors,
self.d_subvector,
self.n_clusters,
device=x.device,
dtype=torch.float32
)
for label in unique_labels:
mask = labels == label
sub_x = (
x[:, mask]
.reshape(self.n_subvectors, self.d_subvector, -1)
.contiguous()
)
self.kmeans.fit(sub_x)
codebook[label] = self.kmeans.centroids
del self.codebook
self.register_buffer("codebook", codebook)
def encode(self, x):
"""
returns code and codebook_index
x: shape: [d_vector, n_data]
"""
n_data = x.shape[1]
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
n_unique = unique_labels.shape[0]
code = torch.zeros(self.n_subvectors, n_data, dtype=torch.uint8, device=self.codebook.device)
for i in range(n_unique):
label = unique_labels[i]
mask = labels == label
sub_x = (
x[:, mask]
.reshape(self.n_subvectors, self.d_subvector, -1)
.contiguous()
)
sub_codebook = self.codebook[label].contiguous()
_, sub_code = self.kmeans.get_labels(sub_x, sub_codebook)
code[:, mask] = sub_code.byte()
return (code, labels)
@staticmethod
def _decode_cpu(codebook, code):
"""
code: torch.Tensor, shape : [n_subvectors, n_data], dtype : uint8
return: torch.Tensor, shape : [d_vector, n_data], dtype : float32
"""
n_subvectors, n_data = code.shape
arange = torch.arange(n_subvectors)[:, None].expand(-1, n_data)
res = codebook[arange, :, code.long()]
res = res.transpose(1, 2).reshape(-1, n_data)
return res
def decode(self, code, codebook_index):
"""
returns reconstruction of code
code: [n_subvectors, n_data]
codebook_index: shape : [n_data], dtype : uint8
"""
n_data = code.shape[1]
unique_labels, counts = codebook_index.unique(return_counts=True)
recon = torch.zeros(
self.d_vector,
n_data,
device=self.codebook.device,
dtype=torch.float32,
)
for label in unique_labels:
mask = codebook_index == label
sub_code = code[:, mask].contiguous()
sub_codebook = self.codebook[label].contiguous()
if self.codebook.device.type == "cpu":
sub_recon = self._decode_cpu(sub_codebook, sub_code)
elif self.codebook.device.type == "cuda":
sub_recon = self._decode_cuda(sub_codebook, sub_code)
recon[:, mask] = sub_recon
return recon
def precompute_adc3(self, x, return_labels=False):
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
n_unique = unique_labels.shape[0]
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
mask = labels[:, None] == unique_labels[None]
xs = [ x[:, mask[:, i]].T for i in range(n_unique)]
lens = [i.shape[0] for i in xs]
padded_x = (
pad_sequence(xs, batch_first=True)
.transpose(-1, -2)
.reshape(n_unique * self.n_subvectors, self.d_subvector, -1)
)
codebook = (
self.codebook[unique_labels]
.reshape(n_unique * self.n_subvectors, self.d_subvector, self.n_clusters)
)
pcd = self.kmeans.sim(padded_x, codebook, normalize=False)
pcd = pcd.reshape(n_unique, self.n_subvectors, -1, self.n_clusters)
for i, label in enumerate(unique_labels):
sub_mask = mask[:, i]
precomputed[:, sub_mask] = pcd[i, :, :lens[i] ]
if return_labels:
return precomputed, labels
else:
return precomputed
def precompute_adc2(self, x, return_labels=False):
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
mask = labels[:, None] == unique_labels[None]
for i, label in enumerate(unique_labels):
sub_mask = mask[:, i]
sub_x = x[:, sub_mask]
sub_x = sub_x.reshape(self.n_subvectors, self.d_subvector, -1)
sub_codebook = self.codebook[label]
sub_precomputed = self.kmeans.sim(sub_x, sub_codebook, normalize=False)
precomputed[:, sub_mask] = sub_precomputed
if return_labels:
return precomputed, labels
else:
return precomputed
def precompute_adc(self, x, return_labels=False):
"""
x: shape : [d_vector, n_data]
"""
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x) #[n_data]
unique_labels, counts = labels.unique(return_counts=True)
groups = counts // self.group_size
unique_groups = groups.unique()
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
for group_index in unique_groups:
group_unique_labels = unique_labels[groups == group_index]
n_gul = group_unique_labels.shape[0]
mask = labels[:, None] == group_unique_labels[None, :] #[n_data, n_gul]
mask2 = mask.sum(dim=1).bool() #[n_data]
sub_x = x[:, mask2]
sub_labels = labels[mask2]
sub_codebook = self.codebook[group_unique_labels] #[n_gul, n_subvectors, d_subvector, n_clusters]
sub_codebook = sub_codebook.reshape(-1, self.d_subvector, self.n_clusters)# [n_gul*n_subvectors, d_subvector, n_clusters]
padded_x = [sub_x[:, sub_labels == lab].T for lab in group_unique_labels]
del sub_x, sub_labels
len_x = [padded_x[i].shape[0] for i in range(n_gul)]
padded_x = (
pad_sequence(padded_x, batch_first=True) #[n_gul, max_n_sub_x, d_vector]
.transpose(-1, -2) #[n_gul, d_vector, max_n_sub_x]
.reshape(n_gul * self.n_subvectors, self.d_subvector, -1)
) #[n_gul* n_subvectors, d_subvector, max_n_sub_x]
sub_precomputed = self.kmeans.sim(padded_x, sub_codebook, normalize=False) #[n_gul*n_subvectors, max_n_sub_x, n_clusters]
del sub_codebook, padded_x
sub_precomputed = sub_precomputed.reshape(n_gul, self.n_subvectors, -1, self.n_clusters) #[n_gul,n_subvectors, max_n_sub_x, n_clusters]
for i in range(n_gul):
lab = group_unique_labels[i]
subsub_precomputed = sub_precomputed[i][:, :len_x[i]] #[n_subvectors, n_subsub_x, n_clusters]
sub_mask = mask[:, i]
precomputed[:, sub_mask] = subsub_precomputed
del sub_precomputed
if return_labels:
return precomputed, labels
else:
return precomputed |
py | 1a36c7e3af0000c16b119abd7844b17ae0e5d7a9 | import torch
import unittest
from super_gradients.training.datasets.data_augmentation import RandomErase
class RandomEraseTest(unittest.TestCase):
def test_random_erase(self):
dummy_input = torch.randn(1, 3, 32, 32)
one_erase = RandomErase(probability=0, value='1.')
self.assertEqual(one_erase.p, 0)
self.assertEqual(one_erase.value, 1.)
one_erase(dummy_input)
rndm_erase = RandomErase(probability=0, value='random')
self.assertEqual(rndm_erase.value, 'random')
rndm_erase(dummy_input)
if __name__ == '__main__':
unittest.main()
|
py | 1a36c7e7eb3d61a87d3a82db8ad5c1cf0d2f5d1c | from django.conf import settings
from django.core import checks
from django.db import models
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from phonenumber_field import formfields
from phonenumber_field.phonenumber import PhoneNumber, to_python, validate_region
from phonenumber_field.validators import validate_international_phonenumber
import functools
class PhoneNumberDescriptor:
"""
The descriptor for the phone number attribute on the model instance.
Returns a PhoneNumber when accessed so you can do stuff like::
>>> instance.phone_number.as_international
Assigns a phone number object on assignment so you can do::
>>> instance.phone_number = PhoneNumber(...)
or,
>>> instance.phone_number = '+414204242'
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
if instance is None:
return self
# The instance dict contains whatever was originally assigned in
# __set__.
if self.field.name in instance.__dict__:
value = instance.__dict__[self.field.name]
else:
instance.refresh_from_db(fields=[self.field.name])
value = getattr(instance, self.field.name)
return value
def __set__(self, instance, value):
instance.__dict__[self.field.name] = to_python(value, region=self.field.region)
class PhoneNumberField(models.CharField):
attr_class = PhoneNumber
descriptor_class = PhoneNumberDescriptor
description = _("Phone number")
<<<<<<< HEAD
def __init__(self, *args, region=None, **kwargs):
kwargs.setdefault("max_length", 128)
super().__init__(*args, **kwargs)
self.region = kwarge.pop('region', None)
self.validators.append(validators.MaxLengthValidator(self.max_length))
region_validator = functools.partial(validate_international_phonenumber, region=self.region)
self.validators.append(region_validator)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_region())
return errors
def _check_region(self):
try:
validate_region(self.region)
except ValueError as e:
return [checks.Error(force_text(e), obj=self)]
return []
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
value = super().get_prep_value(value)
value = to_python(value, region=self.region)
if not isinstance(value, PhoneNumber):
return value
format_string = getattr(settings, 'PHONENUMBER_DB_FORMAT', 'E164')
fmt = PhoneNumber.format_map[format_string]
return value.format_as(fmt)
def contribute_to_class(self, cls, name, *args, **kwargs):
super().contribute_to_class(cls, name, *args, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs["region"] = self._region
return name, path, args, kwargs
def formfield(self, **kwargs):
defaults = {
"form_class": formfields.PhoneNumberField,
"region": self.region,
"error_messages": self.error_messages,
}
defaults.update(kwargs)
return super().formfield(**defaults)
|
py | 1a36c7f34e5f54dde8767ea93690e6808c50245d | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Create a geodesic polygon.
polygon = ee.Geometry.Polygon([
[[-5, 40], [65, 40], [65, 60], [-5, 60], [-5, 60]]
])
# Compute a buffer of the polygon.
buffer = polygon.buffer(0.1)
# Compute the centroid of the polygon.
centroid = polygon.centroid()
Map.addLayer(buffer, {}, 'buffer')
Map.addLayer(centroid, {'color': 'red'}, 'centroid')
# Display the map.
Map
|
py | 1a36c86dc22be9831cdae6744ffbe4ef9189d085 | from cfn_kong_provider import KongProvider
import requests
request_schema = {
"type": "object",
"required": ["AdminURL", "Plugin"],
"properties": {
"AdminURL": {
"type": "string", "pattern": "^https?://.+",
"description": "of kong admin port"},
"JWT": {
"type": "object",
"required": ["PrivateKeyParameterName"],
"properties": {
"Issuer": {
"type": "string", "default": "admin",
"description": "iss attribute of the JWT token"
},
"PrivateKeyParameterName": {
"type": "string",
"description": "containing the RSA key in PEM encoding to sign the JWT token with"
}
}
},
"Plugin": {
"type": "object",
"required": ["name"],
"properties": {
"name": {
"type": "string"
},
"consumer": {
"type": "object",
"required": ["id"],
"properties": {
"id": {
"type": "string"
}
}
},
"service": {
"type": "object",
"required": ["id"],
"properties": {
"id": {
"type": "string"
}
}
},
"route": {
"type": "object",
"required": ["id"],
"properties": {
"id": {
"type": "string"
}
}
},
"config": {
"type": "object"
}
}
}
}
}
class KongPluginProvider(KongProvider):
def __init__(self):
super(KongPluginProvider, self).__init__('plugins', 'Plugin')
self.request_schema = request_schema
provider = KongPluginProvider()
def handler(request, context):
return provider.handle(request, context)
|
py | 1a36c8cb573b7becd6535df6cd4b932564b08fd2 | """Benchmark from Laurent Vaucher.
Source: https://github.com/slowfrog/hexiom : hexiom2.py, level36.txt
(Main function tweaked by Armin Rigo.)
"""
from __future__ import division, print_function
import time
from io import StringIO
import cython
##################################
class Dir(object):
def __init__(self, x, y):
self.x = x
self.y = y
DIRS = [ Dir(1, 0),
Dir(-1, 0),
Dir(0, 1),
Dir(0, -1),
Dir(1, 1),
Dir(-1, -1) ]
EMPTY = 7
##################################
class Done(object):
MIN_CHOICE_STRATEGY = 0
MAX_CHOICE_STRATEGY = 1
HIGHEST_VALUE_STRATEGY = 2
FIRST_STRATEGY = 3
MAX_NEIGHBORS_STRATEGY = 4
MIN_NEIGHBORS_STRATEGY = 5
def __init__(self, count, empty=False):
self.count = count
self.cells = None if empty else [[0, 1, 2, 3, 4, 5, 6, EMPTY] for i in range(count)]
def clone(self):
ret = Done(self.count, True)
ret.cells = [self.cells[i][:] for i in range(self.count)]
return ret
def __getitem__(self, i):
return self.cells[i]
def set_done(self, i, v):
self.cells[i] = [v]
def already_done(self, i):
return len(self.cells[i]) == 1
def remove(self, i, v):
if v in self.cells[i]:
self.cells[i].remove(v)
return True
else:
return False
def remove_all(self, v):
for i in range(self.count):
self.remove(i, v)
def remove_unfixed(self, v):
changed = False
for i in range(self.count):
if not self.already_done(i):
if self.remove(i, v):
changed = True
return changed
def filter_tiles(self, tiles):
for v in range(8):
if tiles[v] == 0:
self.remove_all(v)
@cython.locals(i=cython.int)
def next_cell_min_choice(self):
minlen = 10
mini = -1
for i in range(self.count):
if 1 < len(self.cells[i]) < minlen:
minlen = len(self.cells[i])
mini = i
return mini
@cython.locals(i=cython.int)
def next_cell_max_choice(self):
maxlen = 1
maxi = -1
for i in range(self.count):
if maxlen < len(self.cells[i]):
maxlen = len(self.cells[i])
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_highest_value(self):
maxval = -1
maxi = -1
for i in range(self.count):
if (not self.already_done(i)):
maxvali = max([k for k in self.cells[i] if k != EMPTY])
if maxval < maxvali:
maxval = maxvali
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_first(self):
for i in range(self.count):
if (not self.already_done(i)):
return i
return -1
@cython.locals(i=cython.int)
def next_cell_max_neighbors(self, pos):
maxn = -1
maxi = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n > maxn:
maxn = n
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_min_neighbors(self, pos):
minn = 7
mini = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n < minn:
minn = n
mini = i
return mini
def next_cell(self, pos, strategy=HIGHEST_VALUE_STRATEGY):
if strategy == Done.HIGHEST_VALUE_STRATEGY:
return self.next_cell_highest_value()
elif strategy == Done.MIN_CHOICE_STRATEGY:
return self.next_cell_min_choice()
elif strategy == Done.MAX_CHOICE_STRATEGY:
return self.next_cell_max_choice()
elif strategy == Done.FIRST_STRATEGY:
return self.next_cell_first()
elif strategy == Done.MAX_NEIGHBORS_STRATEGY:
return self.next_cell_max_neighbors(pos)
elif strategy == Done.MIN_NEIGHBORS_STRATEGY:
return self.next_cell_min_neighbors(pos)
else:
raise Exception("Wrong strategy: %d" % strategy)
##################################
class Node(object):
def __init__(self, pos, id, links):
self.pos = pos
self.id = id
self.links = links
##################################
class Hex(object):
@cython.locals(size=cython.int, id=cython.int, x=cython.int, y=cython.int)
def __init__(self, size):
self.size = size
self.count = 3 * size * (size - 1) + 1
self.nodes_by_id = self.count * [None]
self.nodes_by_pos = {}
id = 0
for y in range(size):
for x in range(size + y):
pos = (x, y)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
for y in range(1, size):
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos = (x, ry)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
@cython.locals(dir=Dir, x=cython.int, y=cython.int, nx=cython.int, ny=cython.int, node=Node)
def link_nodes(self):
for node in self.nodes_by_id:
(x, y) = node.pos
for dir in DIRS:
nx = x + dir.x
ny = y + dir.y
if self.contains_pos((nx, ny)):
node.links.append(self.nodes_by_pos[(nx, ny)].id)
def contains_pos(self, pos):
return pos in self.nodes_by_pos
def get_by_pos(self, pos):
return self.nodes_by_pos[pos]
def get_by_id(self, id):
return self.nodes_by_id[id]
##################################
class Pos(object):
def __init__(self, hex, tiles, done = None):
self.hex = hex
self.tiles = tiles
self.done = Done(hex.count) if done is None else done
def clone(self):
return Pos(self.hex, self.tiles, self.done.clone())
##################################
@cython.locals(pos=Pos, i=cython.long, v=cython.int,
nid=cython.int, num=cython.int,
empties=cython.int, filled=cython.int,
vmax=cython.int, vmin=cython.int, cell=list, left=cython.int[8])
def constraint_pass(pos, last_move=None):
changed = False
left = pos.tiles[:]
done = pos.done
# Remove impossible values from free cells
free_cells = (range(done.count) if last_move is None
else pos.hex.get_by_id(last_move).links)
for i in free_cells:
if not done.already_done(i):
vmax = 0
vmin = 0
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
for num in range(7):
if (num < vmin) or (num > vmax):
if done.remove(i, num):
changed = True
# Computes how many of each value is still free
for cell in done.cells:
if len(cell) == 1:
left[cell[0]] -= 1
for v in range(8):
# If there is none, remove the possibility from all tiles
if (pos.tiles[v] > 0) and (left[v] == 0):
if done.remove_unfixed(v):
changed = True
else:
possible = sum([(1 if v in cell else 0) for cell in done.cells])
# If the number of possible cells for a value is exactly the number of available tiles
# put a tile in each cell
if pos.tiles[v] == possible:
for i in range(done.count):
cell = done.cells[i]
if (not done.already_done(i)) and (v in cell):
done.set_done(i, v)
changed = True
# Force empty or non-empty around filled cells
filled_cells = (range(done.count) if last_move is None
else [last_move])
for i in filled_cells:
if done.already_done(i):
num = done[i][0]
empties = 0
filled = 0
unknown = []
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] == EMPTY:
empties += 1
else:
filled += 1
else:
unknown.append(nid)
if len(unknown) > 0:
if num == filled:
for u in unknown:
if EMPTY in done[u]:
done.set_done(u, EMPTY)
changed = True
#else:
# raise Exception("Houston, we've got a problem")
elif num == filled + len(unknown):
for u in unknown:
if done.remove(u, EMPTY):
changed = True
return changed
ASCENDING = 1
DESCENDING = -1
def find_moves(pos, strategy, order):
done = pos.done
cell_id = done.next_cell(pos, strategy)
if cell_id < 0:
return []
if order == ASCENDING:
return [(cell_id, v) for v in done[cell_id]]
else:
# Try higher values first and EMPTY last
moves = list(reversed([(cell_id, v) for v in done[cell_id] if v != EMPTY]))
if EMPTY in done[cell_id]:
moves.append((cell_id, EMPTY))
return moves
def play_move(pos, move):
(cell_id, i) = move
pos.done.set_done(cell_id, i)
@cython.locals(x=cython.int, y=cython.int, ry=cython.int, id=cython.int)
def print_pos(pos, output):
hex = pos.hex
done = pos.done
size = hex.size
for y in range(size):
print(u" " * (size - y - 1), end=u"", file=output)
for x in range(size + y):
pos2 = (x, y)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else u"."
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
for y in range(1, size):
print(u" " * y, end=u"", file=output)
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos2 = (x, ry)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else (u".")
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
OPEN = 0
SOLVED = 1
IMPOSSIBLE = -1
@cython.locals(i=cython.int, num=cython.int, nid=cython.int,
vmin=cython.int, vmax=cython.int, tiles=cython.int[8])
def solved(pos, output, verbose=False):
hex = pos.hex
tiles = pos.tiles[:]
done = pos.done
exact = True
all_done = True
for i in range(hex.count):
if len(done[i]) == 0:
return IMPOSSIBLE
elif done.already_done(i):
num = done[i][0]
tiles[num] -= 1
if (tiles[num] < 0):
return IMPOSSIBLE
vmax = 0
vmin = 0
if num != EMPTY:
cells_around = hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
if (num < vmin) or (num > vmax):
return IMPOSSIBLE
if num != vmin:
exact = False
else:
all_done = False
if (not all_done) or (not exact):
return OPEN
print_pos(pos, output)
return SOLVED
@cython.locals(move=tuple)
def solve_step(prev, strategy, order, output, first=False):
if first:
pos = prev.clone()
while constraint_pass(pos):
pass
else:
pos = prev
moves = find_moves(pos, strategy, order)
if len(moves) == 0:
return solved(pos, output)
else:
for move in moves:
#print("Trying (%d, %d)" % (move[0], move[1]))
ret = OPEN
new_pos = pos.clone()
play_move(new_pos, move)
#print_pos(new_pos)
while constraint_pass(new_pos, move[0]):
pass
cur_status = solved(new_pos, output)
if cur_status != OPEN:
ret = cur_status
else:
ret = solve_step(new_pos, strategy, order, output)
if ret == SOLVED:
return SOLVED
return IMPOSSIBLE
@cython.locals(tot=cython.int, tiles=cython.int[8])
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
done = pos.done
# fill missing entries in tiles
tot = 0
for i in range(8):
if tiles[i] > 0:
tot += tiles[i]
else:
tiles[i] = 0
# check total
if tot != hex.count:
raise Exception("Invalid input. Expected %d tiles, got %d." % (hex.count, tot))
def solve(pos, strategy, order, output):
check_valid(pos)
return solve_step(pos, strategy, order, output, first=True)
# TODO Write an 'iterator' to go over all x,y positions
@cython.locals(x=cython.int, y=cython.int, p=cython.int, tiles=cython.int[8],
size=cython.int, inctile=cython.int, linei=cython.int)
def read_file(file):
lines = [line.strip("\r\n") for line in file.splitlines()]
size = int(lines[0])
hex = Hex(size)
linei = 1
tiles = 8 * [0]
done = Done(hex.count)
for y in range(size):
line = lines[linei][size - y - 1:]
p = 0
for x in range(size + y):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, y, hex.get_by_pos((x, y)).id))
done.set_done(hex.get_by_pos((x, y)).id, inctile)
linei += 1
for y in range(1, size):
ry = size - 1 + y
line = lines[linei][y:]
p = 0
for x in range(y, size * 2 - 1):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, ry, hex.get_by_pos((x, ry)).id))
done.set_done(hex.get_by_pos((x, ry)).id, inctile)
linei += 1
hex.link_nodes()
done.filter_tiles(tiles)
return Pos(hex, tiles, done)
def solve_file(file, strategy, order, output):
pos = read_file(file)
solve(pos, strategy, order, output)
def run_level36():
f = """\
4
2 1 1 2
3 3 3 . .
2 3 3 . 4 .
. 2 . 2 4 3 2
2 2 . . . 2
4 3 4 . .
3 2 3 3
"""
order = DESCENDING
strategy = Done.FIRST_STRATEGY
output = StringIO()
solve_file(f, strategy, order, output)
expected = """\
3 4 3 2
3 4 4 . 3
2 . . 3 4 3
2 . 1 . 3 . 2
3 3 . 2 . 2
3 . 2 . 2
2 2 . 1
"""
if output.getvalue() != expected:
raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
def main(n):
# only run 1/25th of the requested number of iterations.
# with the default n=50 from runner.py, this means twice.
l = []
for i in range(n):
t0 = time.time()
run_level36()
time_elapsed = time.time() - t0
l.append(time_elapsed)
return l
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the hexiom2 benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
|
py | 1a36c91fe4b1d915ebe965ce361cc3bd5cef0348 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
## Data scale introduction
"""
import os
import traceback
from absl import logging
from delta.data.datasets.base_dataset import BaseDataSet
from delta.data.datasets.utils import mock_data
from delta.utils.register import registers
@registers.dataset.register('mock_text_nlu_joint_data')
class MockTextNLUJointData(BaseDataSet):
"""mock nlu-joint data class for nlu-joint task."""
def __init__(self, project_dir):
super().__init__(project_dir)
self.train_file = "train.txt"
self.dev_file = "dev.txt"
self.test_file = "test.txt"
self.data_files = [self.train_file, self.dev_file, self.test_file]
self.config_files = ['nlu_joint_mock.yml']
self.download_files = []
self.text_vocab = "text_vocab.txt"
# samples with label
self.samples = [
"0\tO O O O\tmy feeling is low",
"1\tO O O O B-ORG\ti am happy in the kfc"
]
self.text_vocab_list = [
"<unk>\t0", "</s>\t1", "i\t2", "am\t3", "kfc\t4", "my\t5", "feeling\t6",
"happy\t7", "is\t8", "low\t9", "in\t10", "the\t11"
]
def download(self) -> bool:
return True
def after_download(self) -> bool:
try:
train_file_path = os.path.join(self.data_dir, self.train_file)
dev_file_path = os.path.join(self.data_dir, self.dev_file)
test_file_path = os.path.join(self.data_dir, self.test_file)
text_vocab_file = os.path.join(self.data_dir, self.text_vocab)
mock_data(self.samples, train_file_path, dev_file_path, test_file_path,
text_vocab_file, self.text_vocab_list)
except Exception as e:
logging.warning(traceback.format_exc())
return False
return True
|
py | 1a36c9dd841f19a9c04642e6d3765faa9481b178 |
import asyncio
from typing import Awaitable
from qtoggleserver import startup
def execute(main_code: Awaitable) -> None:
loop = asyncio.get_event_loop()
loop.run_until_complete(startup.init_loop())
loop.run_until_complete(startup.init())
try:
run_loop = loop.run_until_complete(main_code)
if run_loop:
loop.run_forever()
loop.run_until_complete(startup.cleanup())
finally:
try:
loop.run_until_complete(startup.cleanup_loop())
except asyncio.CancelledError:
pass # Ignore any cancelled errors
loop.close()
startup.logger.info('bye!')
|
py | 1a36ca8a5fc154a075d137f3d944cab0335c57af | import os
import urllib
import urllib2
from PIL import Image
import io
def fix_colors(im):
colors_distribution = im.getcolors(1000)
ordered = sorted(colors_distribution, key=lambda x: x[0], reverse=True)
best_colors = [color[1] for color in ordered]
if (255, 255, 255) in best_colors:
best_colors.remove((255, 255, 255))
if (0, 0, 0) in best_colors:
best_colors.remove((0, 0, 0))
best_colors = best_colors[:2]
pixels = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
color = pixels[i, j]
if color not in best_colors:
pixels[i, j] = best_colors[0]
return best_colors[0]
def black_and_white(im, filling):
black = (0, 0, 0)
white = (255, 255, 255)
pixels = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
color = pixels[i, j]
if color == filling:
pixels[i, j] = white
else:
pixels[i, j] = black
# im.show()
def get_coords(im):
pixels = im.load()
black = (0, 0, 0)
xs = []
ys = []
for i in range(im.size[0]):
for j in range(im.size[1]):
color = pixels[i, j]
if color == black:
xs.append(i)
ys.append(j)
return min(xs), max(xs), min(ys), max(ys)
def test_configuration(im_pixels, start_x, start_y, symbol_len, symbol_h, symbol_pixels, symbol_x_min, symbol_y_min):
counter = 0
black = (0, 0, 0)
for i in range(symbol_len):
for j in range(symbol_h):
if im_pixels[start_x + i, start_y + j] == black:
if im_pixels[start_x + i, start_y + j] == symbol_pixels[symbol_x_min + i, symbol_y_min + j]:
counter += 1
else:
counter -= 1
elif symbol_pixels[symbol_x_min + i, symbol_y_min + j] == black:
counter -= 1
return counter
def get_matching(im_pixels, im, x_min, x_max, y_min, y_max, symbol, symbol_pixels, symbol_x_min, symbol_len,
symbol_y_min,
symbol_h):
results = []
for start_x in range(x_min - 1, x_max - symbol_len + 1):
for start_y in range(y_min - 1, y_max - symbol_h + 1):
if (start_x + symbol_len < im.size[0]) and (start_y + symbol_h < im.size[1]):
result = test_configuration(im_pixels, start_x, start_y, symbol_len, symbol_h, symbol_pixels,
symbol_x_min,
symbol_y_min)
results.append((result, start_x, start_y))
if len(results) == 0:
return 0, 0, 0
return max(results)
def is_to_remove(symbol_pixels, x, y):
black = (0, 0, 0)
result = False
for i in range(-1, 1):
for j in range(-1, 1):
result = result or symbol_pixels[x + i, y + j] == black
return result
def remove_used(picture_pixels, symbol, offset_x, offset_y, symbol_len, symbol_h):
white = (255, 255, 255)
symbol_x_min, _, symbol_y_min, _ = get_coords(symbol)
symbol_pixels = symbol.load()
for i in range(offset_x, offset_x + symbol_len + 1):
for j in range(offset_y, offset_y + symbol_h + 1):
if is_to_remove(symbol_pixels, symbol_x_min + i - offset_x, symbol_y_min + j - offset_y):
picture_pixels[i, j] = white
def find_letters(im, x_min, x_max, y_min, y_max, alphabet):
picture_pixels = im.load()
results = []
for i in range(6): # always 6 symbols
scores = []
for symbol, (symbol_image, (symbol_x_min, symbol_x_max, symbol_y_min, symbol_y_max)) in alphabet.items():
symbol_pixels = symbol_image.load()
symbol_len, symbol_h = symbol_x_max - symbol_x_min, symbol_y_max - symbol_y_min
best_score_for_symbol, offset_x, offset_y = get_matching(picture_pixels, im, x_min, x_max, y_min, y_max,
symbol,
symbol_pixels,
symbol_x_min, symbol_len, symbol_y_min,
symbol_h)
scores.append((best_score_for_symbol, symbol, offset_x, offset_y, symbol_len, symbol_h, symbol_image))
best, symbol, offset_x, offset_y, symbol_len, symbol_h, symbol_image = max(scores)
results.append((symbol, offset_x, best))
print(symbol, best)
remove_used(picture_pixels, symbol_image, offset_x, offset_y, symbol_len, symbol_h)
# im.show()
return results
def open_image(path):
im = Image.open(path)
im = im.convert('RGB')
return im
def get_solution(im):
filling = fix_colors(im)
black_and_white(im, filling)
x_min, x_max, y_min, y_max = get_coords(im)
results = find_letters(im, x_min, x_max, y_min, y_max, alphabet)
results = sorted(results, key=lambda x: x[1])
for symbol, position, score in results:
if score < 20:
return None
return "".join([x[0] for x in results])
alphabet = {file: open_image("../alphabet/" + file) for file in os.listdir("../alphabet")}
alphabet = {key[:-4]: (image, get_coords(image)) for key, image in alphabet.items()}
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'PHPSESSID=f0nvdes57f0s24afi8tdrscua4'))
urllib2.install_opener(opener)
while True:
try:
f = opener.open("http://10.13.37.10/captcha.php")
image_file = io.BytesIO(f.read())
imtmp = open_image(image_file)
im = open_image(image_file)
if im.size[0] > 0 and im.size[1] > 0:
# im.show()
res = get_solution(im)
if res is not None:
print res
params = {'captcha': res}
encoded_params = urllib.urlencode(params)
f = opener.open("http://10.13.37.10/", encoded_params)
webpage = f.read()
print(webpage)
if "didn't" in webpage:
imtmp.show()
else:
print "skipping due to a low score"
except:
pass
|
py | 1a36cb62557741a49a32a55ef0e272e320cfebf5 | # -*- coding: utf-8 -*-
import cv2
import os
import numpy as np
import sys
print("python version : {version}".format(version=sys.version))
'''
요구 사항
1. Image Load & Search in directory
- 현재 폴더 출력 : done
- 현재 폴더의 이미지 출력 (확장자는 확장성 있게) : done
- 현재 폴더의 이미지 개수 출력 : done
- 현재 폴더의 이미지에서 숫자를 입력하면, 해당 파일이 open되도록 : done
2. Image Value read & View
- RGB로 띄우기 : done
- gray로 띄우기 : done
- 버튼에 rgb <-> gray change?
3. Auto window size, editing window size
- autosize flag : done
- edit window size : done
- 크기를 늘리면 자동으로 이미지도 늘어나게
4. zooming + moving
- zooming
- moving
5. bbox size, position check
추가 사항
6. crop : done
7. class화
'''
#1
def show_image(img, show_flag='color', size_flag='auto'):
flag = show_type(show_flag)
image = cv2.imread(img, flag)
size_flag = window_size(size_flag)
cv2.namedWindow('image', size_flag)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def crop_drag(event, x, y, flags, param):
global refPt, cropping, i
if event == cv2.EVENT_MOUSEMOVE:
print('event_mousemove!!!')
# if cropping == False:
# temp = image.copy()
# print(refPt)
# cv2.rectangle(temp, refPt[0], refPt[1], (0, 255, 0), 2)
# cv2.imshow('temp', temp)
def crop_image(img):
global image
image = cv2.imread(img)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_crop)
# cv2.setMouseCallback("image", crop_drag)
while True:
cv2.imshow('image', image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = clone.copy()
elif key == ord("c"):
break
if len(refPt) == 2:
crop = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("crop", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
def click_crop(event, x, y, flags, param):
global refPt, cropping
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
def show_type(style):
if style == 'color':
return 1
elif style == 'gray':
return 0
else:
return -1
def window_size(size_flag):
if size_flag == 'auto':
return cv2.WINDOW_AUTOSIZE
elif size_flag == 'normal':
return cv2.WINDOW_NORMAL
elif size_flag == 'full':
return cv2.WINDOW_FULLSCREEN
def zoom(img):
'''
2배로 zoom
'''
image = cv2.imread(img)
zoom = cv2.resize(image, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow('zoom', zoom)
cv2.waitKey(0)
cv2.destroyAllWindows()
def bbox(img):
image = cv2.imread(img)
r = cv2.selectROI(image)
crop = image[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
cv2.imshow("Image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
def view_move(image_file, now_order, len_image):
'''
현재 폴더의 목록을 받아오고 mouse callback 함수를 추가해보기
1. 현재 폴더의 목록은 받아오는 것으로 처리
image_file : 경로
now_order : 현재 이미지의 순서
len_image : 이미지 개수
:return:
'''
image = cv2.imread(image_file[int(now_order)])
cv2.imshow("viewer", image)
now_order = int(now_order)
while True:
key = cv2.waitKey(1) & 0xFF
cv2.namedWindow("viewer")
if key == 27:
quit()
elif key == 2:
if now_order <= 0:
now_order = now_order + len_image - 1
else:
now_order -= 1
image_path = image_file[now_order]
print(image_path)
image = cv2.imread(image_path)
cv2.imshow("viewer", image)
elif key == 3:
if now_order+1 >= len_image:
now_order = now_order - len_image + 1
else:
now_order += 1
image_path = image_file[now_order]
print(image_path)
image = cv2.imread(image_path)
cv2.imshow("viewer", image)
def drag_zoom(event, x, y, flags, param):
global refPt, cropping, direction
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
print('aa')
cropping = True
# elif event == cv2.EVENT_MOUSEMOVE:
# if cropping == True:
# direction.append((x, y))
# print(direction)
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
print('bb')
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
print(refPt[0], refPt[1])
cv2.imshow("image", image)
def drag_zoom_viewer(img):
'''
드래그하면서 줌
img : 오리지날 이미지
copy_img ? 이건 그냥 오리지날 이미지에서 카피하면되지 않남
mouse
마우스가 드래그하는 좌표를 찍어서 -> 그 부분으로 사이즈를 구성해 보이게 하기
:return:
'''
global image, copy_img
image = cv2.imread(img)
y, x = image.shape[:2]
copy_img = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", drag_zoom)
print('x, y', x, y)
while True:
cv2.imshow('image', image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = copy_img.copy()
print('b')
elif key == ord("c"):
print('c')
if len(refPt) == 2:
copy = copy_img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
# fx, fy를 원본 / copy로 나눠서 설정하게 하도록 해보자
cv2.resize(copy, (x, y), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow("image", copy)
print('d')
cv2.waitKey(0)
cv2.destroyAllWindows()
print(refPt)
# if len(refPt) == 2:
# copy = copy_img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
# # fx, fy를 원본 / copy로 나눠서 설정하게 하도록 해보자
# cv2.resize(copy, (x, y), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# cv2.imshow("image", copy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
refPt = []
# direction = []
cropping = False
image_extension = ('jpg', 'jpeg', 'png') # 이미지 확장자
current_folder = os.getcwd() # 현재 폴더
print(current_folder)
image_file = [i for i in os.listdir(current_folder) if i.endswith(image_extension)==True]
print("current folder path : {current_folder}\nimage 개수 : {len_image}\nimage file : {image_file}".format(
current_folder=current_folder, len_image=len(image_file), image_file=image_file
))
input = raw_input("몇번째 이미지를 보여드릴까요?\n")
now_order = int(input)-1
try:
selected_image = image_file[int(input)-1]
print(selected_image)
except IndexError:
print("1부터 {n}까지의 숫자를 입력해주세요".format(n=len(image_file)))
finally:
if int(input)<=0:
print("양수를 입력해주세요")
# show_image(selected_image)
# zoom(selected_image)
# crop_image(selected_image)
# bbox(selected_image)
# view_move(image_file, now_order, len(image_file))
drag_zoom_viewer(selected_image)
|
py | 1a36cba75b4261cc3ddbeac70de948bc983b56c3 | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging.config
import itertools
import platform
import struct
import warnings
import os
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
ElementTreeParseError = getattr(ElementTree, 'ParseError')
except AttributeError:
ElementTreeParseError = getattr(ElementTree, 'XMLParserError')
from unicodedata import east_asian_width
from ..lib import six
PY27 = six.PY2 and sys.version_info[1] == 7
PYPY = platform.python_implementation().lower() == 'pypy'
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
import decimal
DECIMAL_TYPES = [decimal.Decimal, ]
import json # don't remove
if six.PY3:
lrange = lambda *x: list(range(*x))
lzip = lambda *x: list(zip(*x))
lkeys = lambda x: list(x.keys())
lvalues = lambda x: list(x.values())
litems = lambda x: list(x.items())
lmap = lambda *x: list(map(*x))
irange = range
izip = zip
long_type = int
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
if PY27:
from ..lib import enum
else:
import enum
if PY27:
try:
import cdecimal as decimal
DECIMAL_TYPES.append(decimal.Decimal)
except ImportError:
import decimal
else:
import decimal
from collections import OrderedDict
OrderedDict3 = OrderedDict
def u(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, six.text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
dictconfig = lambda config: logging.config.dictConfig(config)
import builtins
from concurrent import futures # don't remove
from datetime import timedelta
total_seconds = timedelta.total_seconds
import functools as functools32
def np_getbuffer(n):
return memoryview(n)
BrokenPipeError = BrokenPipeError
ConnectionResetError = ConnectionResetError
TimeoutError = TimeoutError
from itertools import accumulate
else:
lrange = range
lzip = zip
lkeys = lambda x: x.keys()
lvalues = lambda x: x.values()
litems = lambda x: x.items()
lmap = map
irange = xrange # noqa F821
izip = itertools.izip
long_type = long # noqa F821
from ..lib import enum
try:
import cdecimal as decimal
DECIMAL_TYPES.append(decimal.Decimal)
except ImportError:
import decimal
try:
import cStringIO as StringIO
except ImportError:
import StringIO
StringIO = BytesIO = StringIO.StringIO
def u(s):
return unicode(s, "unicode_escape") # noqa F821
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, six.text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
from collections import OrderedDict
dictconfig = lambda config: logging.config.dictConfig(config)
from datetime import timedelta
total_seconds = timedelta.total_seconds
import __builtin__ as builtins # don't remove
from ..lib import futures # don't remove
from ..lib.functools32.functools32 import OrderedDict as OrderedDict3
from ..lib import functools32 # don't remove
def np_getbuffer(n):
import numpy as np
return np.getbuffer(n)
class TimeoutError(Exception):
pass
class BrokenPipeError(Exception):
pass
class ConnectionResetError(Exception):
pass
def accumulate(iterable, func=lambda a, b: a + b):
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total
if six.PY3:
from contextlib import suppress
else:
from contextlib import contextmanager
@contextmanager
def suppress(*exceptions):
try:
yield
except exceptions:
pass
Enum = enum.Enum
DECIMAL_TYPES = tuple(DECIMAL_TYPES)
Decimal = decimal.Decimal
if sys.version_info.major < 3:
# Due to a bug in python 2.7 Queue.get, if a timeout isn't specified then
# `Queue.get` can't be interrupted. A workaround is to specify an extremely
# long timeout, which then allows it to be interrupted.
# For more information see: https://bugs.python.org/issue1360
def queue_get(q):
return q.get(block=True, timeout=(365 * 24 * 60 * 60))
elif os.name == 'nt':
# Python 3 windows Queue.get also doesn't handle interrupts properly. To
# workaround this we poll at a sufficiently large interval that it
# shouldn't affect performance, but small enough that users trying to kill
# an application shouldn't care.
def queue_get(q):
while True:
try:
return q.get(block=True, timeout=0.1)
except Empty:
pass
else:
def queue_get(q):
return q.get()
from ..lib.lib_utils import isvalidattr, dir2, raise_exc, getargspec, getfullargspec
from ..lib.six.moves import reduce, zip_longest
from ..lib.six.moves import reload_module
from ..lib.six.moves.queue import Queue, Empty, PriorityQueue
from ..lib.six.moves.urllib.request import urlretrieve
from ..lib.six.moves import cPickle as pickle
from ..lib.six.moves.urllib.parse import urlencode, urlparse, unquote, quote, quote_plus, parse_qsl
from ..lib.six.moves import configparser as ConfigParser
try:
import pytz
utc = pytz.utc
FixedOffset = pytz._FixedOffset
except ImportError:
import datetime
_ZERO_TIMEDELTA = datetime.timedelta(0)
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return _ZERO_TIMEDELTA
utc = FixedOffset(0, 'UTC')
try:
from weakref import finalize
except ImportError:
# Backported from Python 3.6
import itertools
from weakref import ref
class finalize:
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
__all__ = ['sys', 'builtins', 'logging.config', 'OrderedDict', 'dictconfig', 'suppress',
'reduce', 'reload_module', 'Queue', 'PriorityQueue', 'Empty', 'ElementTree', 'ElementTreeParseError',
'urlretrieve', 'pickle', 'urlencode', 'urlparse', 'unquote', 'quote', 'quote_plus', 'parse_qsl',
'Enum', 'ConfigParser', 'decimal', 'Decimal', 'DECIMAL_TYPES', 'FixedOffset', 'utc', 'finalize',
'functools32', 'zip_longest', 'OrderedDict3', 'BrokenPipeError', 'TimeoutError', 'ConnectionResetError',
'izip', 'accumulate']
|
py | 1a36cc276269bf5f2deff8858c529109413d870e | #!/usr/bin/env python3
# coding:utf-8
"""
__title__ = ''
__author__ = 'David Ao'
__mtime__ = '2018/6/26'
#
"""
import json
import os
import pandas as pd
import numpy as np
from tensorflow.contrib import learn
from sklearn.metrics import classification_report, f1_score
from tools.utility import Utility
from works.test.base.estimate_base import EstimateBase
class FineGrainedSentimentInfer(EstimateBase):
def __init__(self):
super().__init__()
self.load_config('aspect_pro')
self.reload_model()
self.log = Utility.get_logger('aspect_pro')
self.content_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(self.config['vocab_pkl'])
def batch_iter_test(self, data, batch_size):
"""
:param data:
:param batch_size:
:return:
"""
assert isinstance(data, pd.core.frame.DataFrame), 'test data should be a DataFrame'
content = data.content_token.values.tolist()
data = np.array(list(self.content_vocab_processor.transform(content))).tolist()
return super().batch_iter_test(data, batch_size)
def estimate(self, df):
"""
推理
:param df:
:return:
"""
batches = self.batch_iter_test(df, self.test_batch_size)
# 判断
all_predictions = []
for x_test_batch in batches:
batch_predictions = self.sess.run(self.predictions, {self.input_x: x_test_batch, self.dropout_keep_prob: 1.0})
all_predictions.append(batch_predictions.T)
all_predictions = np.concatenate(all_predictions)
df['y'] = all_predictions.tolist()
return df
def run(self):
"""
:return:
"""
self.run_test()
def test(self, df, out_path, with_pre=False):
"""
:param df:
:param out_path:
:param with_pre:
:return:
"""
# df = df.head(10)
if df is None or len(df) == 0:
self.log.info('parse_dataset is empty')
return
if 'y' in df.columns:
df.rename(columns={'y': 'y_'}, inplace=True)
df = self.estimate(df)
if df is None or len(df) == 0:
self.log.info('estimate result is empty')
return
if 'y_' not in df.columns:
# 无标签测试数据
return self.test_no_label(df, out_path)
def process(row):
lab = eval(row['y_'])
cl = []
for i in range(0, 80, 4):
lt = lab[i: i + 4]
cl.append(np.argmax(lt))
row['label'] = cl
return row
df = df.apply(process, axis=1)
y_pre = df.y
y_pre = np.array(y_pre.tolist())
y_true = df.label
y_true = np.array(y_true.tolist())
f_scores = []
for i in range(20):
f = f1_score(y_true[:, i], y_pre[:, i], average='macro')
f_scores.append(f)
self.log.info('f1 score : {}'.format(f_scores))
f_avg = np.array(f_scores).mean()
self.log.info('mean f1 score: {}'.format(f_avg))
df.to_csv(out_path, index=False, encoding='utf-8')
def test_no_label(self, ret_df, out_path):
"""
无标签数据测试
:param ret_df:
:return:
"""
aspect = ['location_traffic_convenience',
'location_distance_from_business_district', 'location_easy_to_find',
'service_wait_time', 'service_waiters_attitude',
'service_parking_convenience', 'service_serving_speed', 'price_level',
'price_cost_effective', 'price_discount', 'environment_decoration',
'environment_noise', 'environment_space', 'environment_cleaness',
'dish_portion', 'dish_taste', 'dish_look', 'dish_recommendation',
'others_overall_experience', 'others_willing_to_consume_again']
lab_dict = {
0: 0,
1: 1,
2: -2,
3: -1
}
df_ret = ret_df[['id', 'content', 'y']]
def process(row):
# y = eval(row['y'])
y = row['y']
for i, a in enumerate(y):
row[aspect[i]] = lab_dict[a]
return row
df_ret = df_ret.apply(process, axis=1)
df_ret = df_ret.drop(['y'], axis=1)
df_ret.to_csv(out_path, index=False, encoding='utf-8')
|
py | 1a36cc30c3f5b9ed3a1d633018e24be7a415b942 |
#!/usr/bin/env python3
import time
import os
import sqlite3
from sqlite3 import Connection
from typing import List
V100_DB_PATH = "../SQLiteDBs/A4v100.db"
V1K_DB_PATH = "../SQLiteDBs/A4v1k.db"
V10K_DB_PATH = "../SQLiteDBs/A4v10k.db"
V100K_DB_PATH = "../SQLiteDBs/A4v100k.db"
V1M_DB_PATH = "../SQLiteDBs/A4v1M.db"
# Q5: Find the quantity of parts that are not used in any other part, your query must use EXISTS.
# select
# count(partNumber)
# from
# Parts p
# where
# not exists (
# select
# 1
# from
# Parts p2
# where
# p.partNumber = p2.needsPart
# );
QUERY_5 = '''
select
count(partNumber)
from
Parts p
where
not exists (
select
1
from
Parts p2
where
p.partNumber = p2.needsPart
);
'''
# Q6: Find the quantity of parts that are not used in any other part, your query must use NOT IN.
# select
# count(partNumber)
# from
# Parts p
# where
# p.partNumber not in (
# select
# needsPart
# from
# Parts p2
# );
QUERY_6 = '''
select
count(partNumber)
from
Parts p
where
p.partNumber not in (
select
needsPart
from
Parts p2
);
'''
# Creates an index for Q6
# CREATE INDEX idxPartNumberNeedsPart on Parts ( needsPart, partNumber );
CREATE_INDEX_QUERY = '''
CREATE INDEX idxPartNumberNeedsPart on Parts ( needsPart, partNumber );
'''
# Drops the index for Q6
# DROP INDEX idxPartNumberNeedsPart;
DROP_INDEX_QUERY = '''
DROP INDEX idxPartNumberNeedsPart;
'''
country_list = None
def main():
options = {"100": V100_DB_PATH, "1K": V1K_DB_PATH,
"10K": V10K_DB_PATH, "100K": V100K_DB_PATH, "1M": V1M_DB_PATH}
print("Executing Part 4\n")
print("Avg times and sizes for Query 5 without index\n")
run_trials(options, QUERY_5)
print("Avg times and sizes for Query 6 without index\n")
run_trials(options, QUERY_6)
print("Creating index for each database")
update_index(options, CREATE_INDEX_QUERY)
print("Avg times and sizes for Query 6 with index\n")
run_trials(options, QUERY_6)
print("Dropping index for each database\n")
update_index(options, DROP_INDEX_QUERY)
print("Done!")
def update_index(options, query):
for option in options:
path = options[option]
connection = connect(path)
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
def run_trials(options, query):
for option in options:
print("Avg time for {} entries".format(option))
avg_time(options[option], query)
print("Size of database {}".format(os.stat(options[option]).st_size))
print("\n")
def connect(path) -> Connection:
# Returns a connection to the database provided at the path.
db_path = exact_path(path)
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
# To enable foreign keys for SQLite
cursor.execute(' PRAGMA foreign_keys=ON; ')
connection.commit()
return connection
def exact_path(path) -> str:
# Used to convert relative path to absolute path.
curr = os.path.dirname(__file__)
load_path = os.path.join(curr, path)
return load_path
def run_query(path, query) -> None:
connection = connect(path)
cursor = connection.cursor()
cursor.execute(query, {})
connection.commit()
connection.close()
def avg_time(path, query) -> None:
total_time = 0
if path in {V100K_DB_PATH, V1M_DB_PATH} and query is QUERY_5:
print("Skipping this Database")
return
for i in range(0, 100):
t_start = time.process_time()
run_query(path, query)
t_taken = time.process_time() - t_start
total_time += t_taken
# to get the average for total_time
total_time = total_time/100
# display in ms
print("Avg time: {} ms".format(total_time*1000))
if __name__ == "__main__":
main()
|
py | 1a36ccd3936af168a778540f16da27d6e208ae0a | #!/bin/python2.7
# -*- coding: utf-8 -*-
"""
Lucas Ou-Yang 2014 -- http://codelucas.com
"""
import sys
import os
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'newspaper',
]
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload -r pypi')
sys.exit()
# This *must* run early. Please see this API limitation on our users:
# https://github.com/codelucas/newspaper/issues/155
if sys.version_info[0] == 2 and sys.argv[-1] not in ['publish', 'upload']:
sys.exit('WARNING! You are attempting to install newspaper3k\'s '
'python3 repository on python2. PLEASE RUN '
'`$ pip3 install newspaper3k` for python3 or '
'`$ pip install newspaper` for python2')
with open('requirements.txt') as f:
required = f.read().splitlines()
with codecs.open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='newspaper3k',
version='0.2.5',
description='Simplified python article discovery & extraction.',
long_description=readme,
author='Lucas Ou-Yang',
author_email='[email protected]',
url='https://github.com/codelucas/newspaper/',
packages=packages,
include_package_data=True,
install_requires=required,
license='MIT',
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
|
py | 1a36ccd8c472fd4be414cbc91ed42136b0d7420c | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa: F401
from jax._src.lax.lax import (
ConvDimensionNumbers,
ConvGeneralDilatedDimensionNumbers,
DotDimensionNumbers,
GatherDimensionNumbers,
Precision,
RoundingMethod,
ScatterDimensionNumbers,
abs,
abs_p,
acos,
acos_p,
acosh,
acosh_p,
abs,
abs_p,
acos,
acosh,
acosh_p,
add,
add_p,
after_all,
after_all_p,
and_p,
argmax,
argmax_p,
argmin,
argmin_p,
asin,
asin_p,
asinh,
asinh_p,
atan,
atan_p,
atan2,
atan2_p,
atanh,
atanh_p,
batch_matmul,
bessel_i0e,
bessel_i0e_p,
bessel_i1e,
bessel_i1e_p,
betainc,
bitcast_convert_type,
bitcast_convert_type_p,
bitwise_and,
bitwise_not,
bitwise_or,
bitwise_xor,
broadcast,
broadcast_p,
broadcast_in_dim,
broadcast_in_dim_p,
broadcast_shapes,
broadcast_to_rank,
broadcasted_iota,
ceil,
ceil_p,
clamp,
clamp_p,
collapse,
complex,
complex_p,
concatenate,
concatenate_p,
conj,
conj_p,
conv,
conv_dimension_numbers,
conv_general_dilated,
conv_general_dilated_p,
conv_general_permutations,
conv_general_shape_tuple,
conv_shape_tuple,
conv_transpose,
conv_transpose_shape_tuple,
conv_with_general_padding,
convert_element_type,
convert_element_type_p,
cos,
cos_p,
cosh,
cosh_p,
create_token,
create_token_p,
digamma,
digamma_p,
div,
div_p,
dot,
dot_general,
dot_general_p,
dtype,
dtypes,
dynamic_index_in_dim,
dynamic_slice,
dynamic_slice_in_dim,
dynamic_slice_p,
dynamic_update_index_in_dim,
dynamic_update_slice,
dynamic_update_slice_in_dim,
dynamic_update_slice_p,
eq,
eq_p,
erf,
erf_inv,
erf_inv_p,
erf_p,
erfc,
erfc_p,
exp,
exp_p,
expand_dims,
expm1,
expm1_p,
floor,
floor_p,
full,
full_like,
gather,
gather_p,
ge,
ge_p,
gt,
gt_p,
igamma,
igamma_grad_a,
igamma_grad_a_p,
igamma_p,
igammac,
igammac_p,
imag,
imag_p,
index_in_dim,
index_take,
infeed,
infeed_p,
integer_pow,
integer_pow_p,
iota,
iota_p,
is_finite,
is_finite_p,
itertools,
le,
le_p,
lgamma,
lgamma_p,
log,
log1p,
log1p_p,
log_p,
lt,
lt_p,
max,
max_p,
min,
min_p,
mul,
mul_p,
naryop,
naryop_dtype_rule,
ne,
ne_p,
neg,
neg_p,
nextafter,
nextafter_p,
not_p,
or_p,
outfeed,
outfeed_p,
pad,
pad_p,
padtype_to_pads,
partial,
population_count,
population_count_p,
pow,
pow_p,
prod,
random_gamma_grad,
random_gamma_grad_p,
real,
real_p,
reciprocal,
reduce,
reduce_and_p,
reduce_max_p,
reduce_min_p,
reduce_or_p,
reduce_p,
reduce_prod_p,
reduce_sum_p,
reduce_window,
reduce_window_max_p,
reduce_window_min_p,
reduce_window_p,
reduce_window_shape_tuple,
reduce_window_sum_p,
regularized_incomplete_beta_p,
rem,
rem_p,
reshape,
reshape_p,
rev,
rev_p,
rng_uniform,
rng_uniform_p,
round,
round_p,
rsqrt,
rsqrt_p,
scatter,
scatter_add,
scatter_add_p,
scatter_max,
scatter_max_p,
scatter_min,
scatter_min_p,
scatter_mul,
scatter_mul_p,
scatter_p,
select,
select_and_gather_add_p,
select_and_scatter_add_p,
select_and_scatter_p,
select_p,
shift_left,
shift_left_p,
shift_right_arithmetic,
shift_right_arithmetic_p,
shift_right_logical,
shift_right_logical_p,
sign,
sign_p,
sin,
sin_p,
sinh,
sinh_p,
slice,
slice_in_dim,
slice_p,
sort,
sort_key_val,
sort_p,
sqrt,
sqrt_p,
square,
squeeze,
squeeze_p,
standard_abstract_eval,
standard_naryop,
standard_primitive,
standard_translate,
standard_unop,
stop_gradient,
sub,
sub_p,
tan,
tan_p,
tanh,
tanh_p,
tie_in,
tie_in_p,
top_k,
top_k_p,
transpose,
transpose_p,
unop,
unop_dtype_rule,
xor_p,
zeros_like_array,
)
from jax._src.lax.lax import (_reduce_sum, _reduce_max, _reduce_min, _reduce_or,
_reduce_and, _reduce_window_sum, _reduce_window_max,
_reduce_window_min, _reduce_window_prod,
_select_and_gather_add,
_select_and_scatter_add, _float, _complex, _input_dtype,
_const, _eq_meet, _broadcasting_select,
_check_user_dtype_supported, _one, _zero, _const,
_upcast_fp16_for_computation, _broadcasting_shape_rule,
_eye, _tri, _delta, _ones, _zeros, _dilate_shape)
from jax._src.lax.control_flow import (
associative_scan,
cond,
cond_p,
cummax,
cummax_p,
cummin,
cummin_p,
cumprod,
cumprod_p,
cumsum,
cumsum_p,
custom_linear_solve,
custom_root,
fori_loop,
linear_solve_p,
map,
scan,
scan_bind,
scan_p,
switch,
while_loop,
while_p,
)
from jax._src.lax.fft import (
fft,
fft_p,
)
from jax._src.lax.parallel import (
all_gather,
all_to_all,
all_to_all_p,
axis_index,
axis_index_p,
pmax,
pmax_p,
pmean,
pmin,
pmin_p,
ppermute,
ppermute_p,
pshuffle,
psum,
psum_p,
pswapaxes,
pdot,
)
from jax._src.lax.other import (
conv_general_dilated_patches
)
from . import linalg
|
py | 1a36cebf58d422104f1fc311a35ad678e77eea35 | try:
# Try to use setuptools so as to enable support of the special
# "Microsoft Visual C++ Compiler for Python 2.7" (http://aka.ms/vcpython27)
# for building under Windows.
# Note setuptools >= 6.0 is required for this.
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from distutils.command import build
from distutils.spawn import spawn
from distutils import sysconfig
import sys
import os
import platform
import versioneer
if sys.platform.startswith('linux'):
# Patch for #2555 to make wheels without libpython
sysconfig.get_config_vars()['Py_ENABLE_SHARED'] = 0
class build_doc(build.build):
description = "build documentation"
def run(self):
spawn(['make', '-C', 'docs', 'html'])
versioneer.VCS = 'git'
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_doc'] = build_doc
GCCFLAGS = ["-std=c89", "-Wdeclaration-after-statement", "-Werror"]
if os.environ.get("NUMBA_GCC_FLAGS"):
CFLAGS = GCCFLAGS
else:
CFLAGS = ['-g']
install_name_tool_fixer = []
if sys.platform == 'darwin':
install_name_tool_fixer += ['-headerpad_max_install_names']
def is_building():
"""
Parse the setup.py command and return whether a build is requested.
If False is returned, only an informational command is run.
If True is returned, information about C extensions will have to
be passed to the setup() function.
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
# Add commands that do more than print info, but also don't need
# any build step.
info_commands.extend(['egg_info', 'install_egg_info', 'rotate'])
for command in info_commands:
if command in sys.argv[1:]:
return False
return True
def is_building_wheel():
if len(sys.argv) < 2:
# No command is given.
return False
return 'bdist_wheel' in sys.argv[1:]
def get_ext_modules():
"""
Return a list of Extension instances for the setup() call.
"""
# Note we don't import Numpy at the toplevel, since setup.py
# should be able to run without Numpy for pip to discover the
# build dependencies
import numpy.distutils.misc_util as np_misc
# Inject required options for extensions compiled against the Numpy
# C API (include dirs, library dirs etc.)
np_compile_args = np_misc.get_info('npymath')
ext_dynfunc = Extension(name='numba._dynfunc',
sources=['numba/_dynfuncmod.c'],
extra_compile_args=CFLAGS,
depends=['numba/_pymodule.h',
'numba/_dynfunc.c'])
ext_dispatcher = Extension(name="numba._dispatcher",
sources=['numba/_dispatcher.c',
'numba/_typeof.c',
'numba/_hashtable.c',
'numba/_dispatcherimpl.cpp',
'numba/typeconv/typeconv.cpp'],
depends=["numba/_pymodule.h",
"numba/_dispatcher.h",
"numba/_typeof.h",
"numba/_hashtable.h"],
**np_compile_args)
ext_helperlib = Extension(name="numba._helperlib",
sources=["numba/_helpermod.c",
"numba/_math_c99.c"],
extra_compile_args=CFLAGS,
extra_link_args=install_name_tool_fixer,
depends=["numba/_pymodule.h",
"numba/_math_c99.h",
"numba/_helperlib.c",
"numba/_lapack.c",
"numba/_npymath_exports.c",
"numba/_random.c",
"numba/mathnames.inc"],
**np_compile_args)
ext_typeconv = Extension(name="numba.typeconv._typeconv",
sources=["numba/typeconv/typeconv.cpp",
"numba/typeconv/_typeconv.cpp"],
depends=["numba/_pymodule.h"],
)
ext_npyufunc_ufunc = Extension(name="numba.npyufunc._internal",
sources=["numba/npyufunc/_internal.c"],
depends=["numba/npyufunc/_ufunc.c",
"numba/npyufunc/_internal.h",
"numba/_pymodule.h"],
**np_compile_args)
ext_npyufunc_workqueue_impls = []
def check_file_at_path(path2file):
"""
Takes a list as a path, a single glob (*) is permitted as an entry which
indicates that expansion at this location is required (i.e. version
might not be known).
"""
found = None
path2check = [os.path.split(os.path.split(sys.executable)[0])[0]]
path2check += [os.getenv(n, '') for n in ['CONDA_PREFIX', 'PREFIX']]
if sys.platform.startswith('win'):
path2check += [os.path.join(p, 'Library') for p in path2check]
for p in path2check:
if p:
if '*' in path2file:
globloc = path2file.index('*')
searchroot = os.path.join(*path2file[:globloc])
try:
potential_locs = os.listdir(os.path.join(p, searchroot))
except BaseException:
continue
searchfor = path2file[globloc + 1:]
for x in potential_locs:
potpath = os.path.join(p, searchroot, x, *searchfor)
if os.path.isfile(potpath):
found = p # the latest is used
elif os.path.isfile(os.path.join(p, *path2file)):
found = p # the latest is used
return found
# Search for Intel TBB, first check env var TBBROOT then conda locations
tbb_root = os.getenv('TBBROOT')
if not tbb_root:
tbb_root = check_file_at_path(['include', 'tbb', 'tbb.h'])
# Set various flags for use in TBB and openmp. On OSX, also find OpenMP!
have_openmp = True
if sys.platform.startswith('win'):
cpp11flags = []
ompcompileflags = ['-openmp']
omplinkflags = []
elif sys.platform.startswith('darwin'):
cpp11flags = ['-std=c++11']
# This is a bit unusual but necessary...
# llvm (clang) OpenMP is used for headers etc at compile time
# Intel OpenMP (libiomp5) provides the link library.
# They are binary compatible and may not safely coexist in a process, as
# libiomp5 is more prevalent and often linked in for NumPy it is used
# here!
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp=libiomp5']
omppath = ['lib', 'clang', '*', 'include', 'omp.h']
have_openmp = check_file_at_path(omppath)
else:
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
if platform.machine() == 'ppc64le':
omplinkflags = ['-fopenmp']
else:
omplinkflags = ['-fopenmp']
if tbb_root:
print("Using Intel TBB from:", tbb_root)
ext_npyufunc_tbb_workqueue = Extension(
name='numba.npyufunc.tbbpool',
sources=['numba/npyufunc/tbbpool.cpp', 'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'],
include_dirs=[os.path.join(tbb_root, 'include')],
extra_compile_args=cpp11flags,
libraries =['tbb'], # TODO: if --debug or -g, use 'tbb_debug'
library_dirs=[os.path.join(tbb_root, 'lib', 'intel64', 'gcc4.4'), # for Linux
os.path.join(tbb_root, 'lib'), # for MacOS
os.path.join(tbb_root, 'lib', 'intel64', 'vc_mt'), # for Windows
],
)
ext_npyufunc_workqueue_impls.append(ext_npyufunc_tbb_workqueue)
else:
print("TBB not found")
# Disable OpenMP if we are building a wheel or
# forced by user with NUMBA_NO_OPENMP=1
if is_building_wheel() or os.getenv('NUMBA_NO_OPENMP'):
print("OpenMP disabled")
elif have_openmp:
print("Using OpenMP from:", have_openmp)
# OpenMP backed work queue
ext_npyufunc_omppool = Extension( name='numba.npyufunc.omppool',
sources=['numba/npyufunc/omppool.cpp',
'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'],
extra_compile_args=ompcompileflags + cpp11flags,
extra_link_args = omplinkflags)
ext_npyufunc_workqueue_impls.append(ext_npyufunc_omppool)
else:
print("OpenMP not found")
# Build the Numba workqueue implementation irrespective of whether the TBB
# version is built. Users can select a backend via env vars.
ext_npyufunc_workqueue = Extension(
name='numba.npyufunc.workqueue',
sources=['numba/npyufunc/workqueue.c', 'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'])
ext_npyufunc_workqueue_impls.append(ext_npyufunc_workqueue)
ext_mviewbuf = Extension(name='numba.mviewbuf',
extra_link_args=install_name_tool_fixer,
sources=['numba/mviewbuf.c'])
ext_nrt_python = Extension(name='numba.runtime._nrt_python',
sources=['numba/runtime/_nrt_pythonmod.c',
'numba/runtime/nrt.c'],
depends=['numba/runtime/nrt.h',
'numba/_pymodule.h',
'numba/runtime/_nrt_python.c'],
**np_compile_args)
ext_jitclass_box = Extension(name='numba.jitclass._box',
sources=['numba/jitclass/_box.c'],
depends=['numba/_pymodule.h'],
)
ext_cuda_extras = Extension(name='numba.cuda.cudadrv._extras',
sources=['numba/cuda/cudadrv/_extras.c'],
depends=['numba/_pymodule.h'],
include_dirs=["numba"])
ext_modules = [ext_dynfunc, ext_dispatcher, ext_helperlib, ext_typeconv,
ext_npyufunc_ufunc, ext_mviewbuf, ext_nrt_python,
ext_jitclass_box, ext_cuda_extras]
ext_modules += ext_npyufunc_workqueue_impls
return ext_modules
def find_packages(root_dir, root_name):
"""
Recursively find packages in *root_dir*.
"""
packages = []
def rec(path, pkg_name):
packages.append(pkg_name)
for fn in sorted(os.listdir(path)):
subpath = os.path.join(path, fn)
if os.path.exists(os.path.join(subpath, "__init__.py")):
subname = "%s.%s" % (pkg_name, fn)
rec(subpath, subname)
rec(root_dir, root_name)
return packages
packages = find_packages("numba", "numba")
build_requires = ['numpy']
install_requires = ['llvmlite>=0.27.0dev0', 'numpy']
if sys.version_info < (3, 4):
install_requires.extend(['enum34', 'singledispatch'])
if sys.version_info < (3, 3):
install_requires.append('funcsigs')
metadata = dict(
name='numba',
description="compiling Python code using LLVM",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Compilers",
],
package_data={
# HTML templates for type annotations
"numba.annotations": ["*.html"],
# Various test data
"numba.cuda.tests.cudadrv.data": ["*.ptx"],
"numba.tests": ["pycc_distutils_usecase/*.py"],
# Some C files are needed by pycc
"numba": ["*.c", "*.h"],
"numba.pycc": ["*.c", "*.h"],
"numba.runtime": ["*.c", "*.h"],
},
scripts=["numba/pycc/pycc", "bin/numba"],
author="Anaconda, Inc.",
author_email="[email protected]",
url="http://numba.github.com",
packages=packages,
setup_requires=build_requires,
install_requires=install_requires,
license="BSD",
cmdclass=cmdclass,
)
with open('README.rst') as f:
metadata['long_description'] = f.read()
if is_building():
metadata['ext_modules'] = get_ext_modules()
setup(**metadata)
|
py | 1a36ceec4cba02d519929f73d2e1f9c7ea9ed08f | from django.urls import reverse, resolve
class TestUrls:
def test_words_url(self):
#path = reverse('main_app', kwargs={'pk':1})
assert True #resolve(path).view_name == 'main_app' |
py | 1a36cf16cae0e4f3211edaebba7dd7188cf3afaa | """Base class to manage comms"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from traitlets.config import LoggingConfigurable
from IPython.core.prompts import LazyEvaluate
from IPython.core.getipython import get_ipython
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
from traitlets import Instance, Unicode, Dict, Any
from .comm import Comm
def lazy_keys(dikt):
"""Return lazy-evaluated string representation of a dictionary's keys
Key list is only constructed if it will actually be used.
Used for debug-logging.
"""
return LazyEvaluate(lambda d: list(d.keys()))
class CommManager(LoggingConfigurable):
"""Manager for Comms in the Kernel"""
# If this is instantiated by a non-IPython kernel, shell will be None
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
kernel = Instance('ipykernel.kernelbase.Kernel')
iopub_socket = Any()
def _iopub_socket_default(self):
return self.kernel.iopub_socket
session = Instance('jupyter_client.session.Session')
def _session_default(self):
return self.kernel.session
comms = Dict()
targets = Dict()
# Public APIs
def register_target(self, target_name, f):
"""Register a callable f for a given target name
f will be called with two arguments when a comm_open message is received with `target`:
- the Comm instance
- the `comm_open` message itself.
f can be a Python callable or an import string for one.
"""
if isinstance(f, string_types):
f = import_item(f)
self.targets[target_name] = f
def unregister_target(self, target_name, f):
"""Unregister a callable registered with register_target"""
return self.targets.pop(target_name);
def register_comm(self, comm):
"""Register a new comm"""
comm_id = comm.comm_id
comm.shell = self.shell
comm.kernel = self.kernel
comm.iopub_socket = self.iopub_socket
self.comms[comm_id] = comm
return comm_id
def unregister_comm(self, comm):
"""Unregister a comm, and close its counterpart"""
# unlike get_comm, this should raise a KeyError
comm = self.comms.pop(comm.comm_id)
def get_comm(self, comm_id):
"""Get a comm with a particular id
Returns the comm if found, otherwise None.
This will not raise an error,
it will log messages if the comm cannot be found.
"""
if comm_id not in self.comms:
self.log.warn("No such comm: %s", comm_id)
self.log.debug("Current comms: %s", lazy_keys(self.comms))
return
# call, because we store weakrefs
comm = self.comms[comm_id]
return comm
# Message handlers
def comm_open(self, stream, ident, msg):
"""Handler for comm_open messages"""
content = msg['content']
comm_id = content['comm_id']
target_name = content['target_name']
f = self.targets.get(target_name, None)
comm = Comm(comm_id=comm_id,
shell=self.shell,
kernel=self.kernel,
iopub_socket=self.iopub_socket,
primary=False,
target_name=target_name,
)
self.register_comm(comm)
if f is None:
self.log.error("No such comm target registered: %s", target_name)
else:
try:
f(comm, msg)
return
except Exception:
self.log.error("Exception opening comm with target: %s", target_name, exc_info=True)
# Failure.
try:
comm.close()
except:
self.log.error("""Could not close comm during `comm_open` failure
clean-up. The comm may not have been opened yet.""", exc_info=True)
def comm_msg(self, stream, ident, msg):
"""Handler for comm_msg messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
# no such comm
return
try:
comm.handle_msg(msg)
except Exception:
self.log.error("Exception in comm_msg for %s", comm_id, exc_info=True)
def comm_close(self, stream, ident, msg):
"""Handler for comm_close messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
# no such comm
self.log.debug("No such comm to close: %s", comm_id)
return
del self.comms[comm_id]
try:
comm.handle_close(msg)
except Exception:
self.log.error("Exception handling comm_close for %s", comm_id, exc_info=True)
__all__ = ['CommManager']
|
py | 1a36d0b008fea4a14c664fd27dbd1c556bb42493 | # Copyright (C) 2012-2016 Ben Kurtovic <[email protected]>
# Copyright (C) 2019-2020 Yuri Astrakhan <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sys import maxsize
__all__ = []
def inheritdoc(method):
"""Set __doc__ of *method* to __doc__ of *method* in its parent class.
Since this is used on :class:`.SmartList`, the "parent class" used is
``list``. This function can be used as a decorator.
"""
method.__doc__ = getattr(list, method.__name__).__doc__
return method
class _SliceNormalizerMixIn:
"""MixIn that provides a private method to normalize slices."""
def _normalize_slice(self, key, clamp=False):
"""Return a slice equivalent to the input *key*, standardized."""
if key.start is None:
start = 0
else:
start = (len(self) + key.start) if key.start < 0 else key.start
if key.stop is None or key.stop == maxsize:
stop = len(self) if clamp else None
else:
stop = (len(self) + key.stop) if key.stop < 0 else key.stop
return slice(start, stop, key.step or 1)
|
py | 1a36d14d56e500697a8b5b4092d4db8b17397acf | import unittest
import logging
import sys
sys.path.append('../')
from backend.charts import Charts
from cleep.exception import InvalidParameter, MissingParameter, CommandError, Unauthorized
from cleep.libs.tests import session
import os
import sqlite3
import time
from mock import Mock
class FakeEvent():
def __init__(self, values):
self.values = values
def get_chart_values(self, params):
return self.values
class TestCharts(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.FATAL, format=u'%(asctime)s %(name)s:%(lineno)d %(levelname)s : %(message)s')
self.session = session.TestSession(self)
def tearDown(self):
self.session.clean()
if os.path.exists(self.db_path):
os.remove(self.db_path)
def init(self, start=True):
_charts = Charts
_charts.DATABASE_PATH = '/tmp/'
self.db_path = os.path.join(_charts.DATABASE_PATH, _charts.DATABASE_NAME)
_charts.CHECK_SAME_THREAD = False
self.module = self.session.setup(_charts)
self.session.start_module(self.module)
self.cnx = sqlite3.connect(os.path.join(_charts.DATABASE_PATH, _charts.DATABASE_NAME))
self.cur = self.cnx.cursor()
def __get_table_count(self, table_name, uuid=None):
query = 'SELECT count(*) FROM %s' % table_name
if uuid:
query = '%s WHERE uuid="%s"' % (query, uuid)
self.cur.execute(query)
res = self.cur.fetchall()
return res[0][0]
def __get_table_rows(self, table_name, uuid=None):
query = 'SELECT * FROM %s' % table_name
if uuid:
query = '%s WHERE uuid="%s"' % (query, uuid)
self.cur.execute(query)
res = self.cur.fetchall()
return res
def __fill_data_table(self, table_name, values, event_name='test.test.test', fields_name=['field1', 'field2', 'field3', 'field4', 'field5']):
"""
Insert values into specified table
Args:
table_name (string): table name
values (list): list of tuple of values::
[
(val1, val2),
(val3, val4),
...
]
event_name (string): event name. Default test.test.test
fields_name (list): fields name
"""
columns = ['timestamp', 'uuid']
for i in range(len(values[0])-2):
columns.append('value' + str(i+1))
query = 'INSERT INTO %s(%s) VALUES %s' % (table_name, ','.join(columns), ','.join([str(val) for val in values]))
# logging.debug('Fill data table: %s' % query)
self.cur.execute(query)
columns = ['uuid', 'event', 'valuescount']
for i in range(len(values[0])-2):
columns.append('value' + str(i+1))
# fields: uuid, event, valuescount, value1, value2...
device_values = [values[0][1], event_name, len(values[0])-2]
for i in range(len(values[0])-2):
device_values.append(fields_name[i])
# logging.debug('device values: %s' % device_values)
query = 'INSERT INTO devices(%s) VALUES (%s)' % (','.join(columns), ('?,'*len(device_values))[:-1])
# logging.debug('Fill device table: %s' % query)
self.cur.execute(query, device_values)
self.cnx.commit()
def test_check_database(self):
self.init()
self.cur.execute('SELECT name FROM sqlite_master WHERE type="table";')
tables = self.cur.fetchall()
tables = [table[0] for table in tables]
self.assertTrue('data1' in tables, 'data1 table should be created')
self.assertTrue('data2' in tables, 'data2 table should be created')
self.assertTrue('data3' in tables, 'data3 table should be created')
self.assertTrue('data4' in tables, 'data4 table should be created')
self.assertTrue('devices' in tables, 'devices table should be created')
def test_save_data_1(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test', 'value':1}]
self.module._save_data(uuid, event, values)
count = self.__get_table_count('devices')
self.assertEqual(count, 1, 'Devices table should have only one record')
row = self.__get_table_rows('devices')
#(u'132-456-789', u'test.test.test', 1, u'test', None, None, None)
self.assertEqual(row[0][0], uuid, 'Device uuid is not properly saved')
self.assertEqual(row[0][1], event, 'Event is not properly saved')
self.assertEqual(row[0][2], len(values), 'Values count is not properly saved')
self.assertEqual(row[0][3], values[0]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][4], None, 'Field name is not properly saved')
self.assertEqual(row[0][5], None, 'Field name is not properly saved')
self.assertEqual(row[0][6], None, 'Field name is not properly saved')
count = self.__get_table_count('data1')
self.assertEqual(count, 1, 'Data1 table should have only one record')
count = self.__get_table_count('data2')
self.assertEqual(count, 0, 'Data2 table should have no record')
count = self.__get_table_count('data3')
self.assertEqual(count, 0, 'Data3 table should have no record')
count = self.__get_table_count('data4')
self.assertEqual(count, 0, 'Data4 table should have no record')
def test_save_data_2(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}]
self.module._save_data(uuid, event, values)
count = self.__get_table_count('devices')
self.assertEqual(count, 1, 'Devices table should have only one record')
row = self.__get_table_rows('devices')
self.assertEqual(row[0][0], uuid, 'Device uuid is not properly saved')
self.assertEqual(row[0][1], event, 'Event is not properly saved')
self.assertEqual(row[0][2], len(values), 'Values count is not properly saved')
self.assertEqual(row[0][3], values[0]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][4], values[1]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][5], None, 'Field name is not properly saved')
self.assertEqual(row[0][6], None, 'Field name is not properly saved')
count = self.__get_table_count('data1')
self.assertEqual(count, 0, 'Data1 table should have no record')
count = self.__get_table_count('data2')
self.assertEqual(count, 1, 'Data2 table should have only one record')
count = self.__get_table_count('data3')
self.assertEqual(count, 0, 'Data3 table should have no record')
count = self.__get_table_count('data4')
self.assertEqual(count, 0, 'Data4 table should have no record')
def test_save_data_3(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}]
self.module._save_data(uuid, event, values)
count = self.__get_table_count('devices')
self.assertEqual(count, 1, 'Devices table should have only one record')
row = self.__get_table_rows('devices')
self.assertEqual(row[0][0], uuid, 'Device uuid is not properly saved')
self.assertEqual(row[0][1], event, 'Event is not properly saved')
self.assertEqual(row[0][2], len(values), 'Values count is not properly saved')
self.assertEqual(row[0][3], values[0]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][4], values[1]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][5], values[2]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][6], None, 'Field name is not properly saved')
count = self.__get_table_count('data1')
self.assertEqual(count, 0, 'Data1 table should have no record')
count = self.__get_table_count('data2')
self.assertEqual(count, 0, 'Data2 table should have no record')
count = self.__get_table_count('data3')
self.assertEqual(count, 1, 'Data3 table should have only one record')
count = self.__get_table_count('data4')
self.assertEqual(count, 0, 'Data4 table should have no record')
def test_save_data_4(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values)
count = self.__get_table_count('devices')
self.assertEqual(count, 1, 'Devices table should have only one record')
row = self.__get_table_rows('devices')
self.assertEqual(row[0][0], uuid, 'Device uuid is not properly saved')
self.assertEqual(row[0][1], event, 'Event is not properly saved')
self.assertEqual(row[0][2], len(values), 'Values count is not properly saved')
self.assertEqual(row[0][3], values[0]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][4], values[1]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][5], values[2]['field'], 'Field name is not properly saved')
self.assertEqual(row[0][6], values[3]['field'], 'Field name is not properly saved')
count = self.__get_table_count('data1')
self.assertEqual(count, 0, 'Data1 table should have no record')
count = self.__get_table_count('data2')
self.assertEqual(count, 0, 'Data2 table should have no record')
count = self.__get_table_count('data3')
self.assertEqual(count, 0, 'Data3 table should have no record')
count = self.__get_table_count('data4')
self.assertEqual(count, 1, 'Data4 table should have only one record')
def test_save_data_bool(self):
self.init()
event = 'test.test.test'
uuid = '123-456-789-0'
values = [{'field':'test', 'value':True}]
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data1')
self.assertEqual(row[0][3], 1, 'Bool value is not properly saved')
uuid = '123-456-789-1'
values = [{'field':'test1', 'value':True}, {'field':'test2', 'value':True}]
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data2')
self.assertEqual(row[0][3], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][4], 1, 'Bool value is not properly saved')
uuid = '123-456-789-2'
values = [{'field':'test1', 'value':True}, {'field':'test2', 'value':True}, {'field':'test3', 'value':True}]
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data3')
self.assertEqual(row[0][3], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][4], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][5], 1, 'Bool value is not properly saved')
uuid = '123-456-789-3'
values = [{'field':'test1', 'value':True}, {'field':'test2', 'value':True}, {'field':'test3', 'value':True}, {'field':'test4', 'value':True}]
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data4')
self.assertEqual(row[0][3], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][4], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][5], 1, 'Bool value is not properly saved')
self.assertEqual(row[0][6], 1, 'Bool value is not properly saved')
uuid = '123-456-789-4'
values = [{'field':'test1', 'value':False}, {'field':'test2', 'value':False}, {'field':'test3', 'value':False}, {'field':'test4', 'value':False}]
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data4', uuid)
self.assertEqual(row[0][3], 0, 'Bool value is not properly saved')
self.assertEqual(row[0][4], 0, 'Bool value is not properly saved')
self.assertEqual(row[0][5], 0, 'Bool value is not properly saved')
self.assertEqual(row[0][6], 0, 'Bool value is not properly saved')
def test_save_data_existing_device(self):
self.init()
event = 'test.test.test'
uuid = '123-456-789'
values = [{'field':'test', 'value':1}]
self.module._save_data(uuid, event, values)
self.module._save_data(uuid, event, values)
row = self.__get_table_rows('data1')
self.assertEqual(len(row), 2, 'It should have 2 rows')
def test_save_data_existing_device_different_event(self):
self.init()
event1 = 'test.test.test1'
event2 = 'test.test.test2'
uuid = '123-456-789'
values = [{'field':'test', 'value':1}]
self.module._save_data(uuid, event1, values)
with self.assertRaises(CommandError) as cm:
self.module._save_data(uuid, event2, values)
self.assertEqual(cm.exception.message, 'Device %s cannot store values from event %s (stored for event %s)' % (uuid, event2, event1), 'Invalid message')
def test_save_data_exisiting_device_with_incompatible_values(self):
self.init()
event = 'test.test.test'
uuid = '123-456-789'
values1 = [{'field':'test', 'value':1}]
values2 = [{'field':'test', 'value':1}, {'field':'test', 'value':2}]
self.module._save_data(uuid, event, values1)
with self.assertRaises(CommandError) as cm:
self.module._save_data(uuid, event, values2)
self.assertEqual(cm.exception.message, 'Event %s is supposed to store %d values not %d' % (event, len(values1), len(values2)), 'Invalid message')
def test_save_data_invalid_parameters(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
with self.assertRaises(MissingParameter) as cm:
self.module._save_data(None, event, values)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module._save_data('', event, values)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module._save_data(uuid, None, values)
self.assertEqual(cm.exception.message, 'Parameter "event" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module._save_data(uuid, '', values)
self.assertEqual(cm.exception.message, 'Parameter "event" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module._save_data(uuid, event, None)
self.assertEqual(cm.exception.message, 'Parameter "values" is missing', 'Uuid should not be None')
with self.assertRaises(InvalidParameter) as cm:
self.module._save_data(uuid, event, 1)
self.assertEqual(cm.exception.message, 'Parameter "values" must be a list', 'Uuid should not be None')
with self.assertRaises(InvalidParameter) as cm:
self.module._save_data(uuid, event, {'field':'field'})
self.assertEqual(cm.exception.message, 'Parameter "values" must be a list', 'Uuid should not be None')
with self.assertRaises(InvalidParameter) as cm:
self.module._save_data(uuid, event, [])
self.assertEqual(cm.exception.message, 'No value to save', 'Should failed if values if empty')
with self.assertRaises(InvalidParameter) as cm:
self.module._save_data(uuid, event, [{}, {}, {}, {}, {}])
self.assertEqual(cm.exception.message, 'Too many values to save for event "%s". It is limited to 4 values for now: %s' % (event, [{},{},{},{},{}]), 'Should failed if too many values are passed')
def test_get_data_1(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}]
self.module._save_data(uuid, event, values)
end = int(time.time())
data = self.module.get_data(uuid, start, end)
self.assertTrue('data' in data, 'Data field is missing in get_data response')
self.assertTrue('uuid' in data, 'Uuid field is missing in get_data response')
self.assertTrue('event' in data, 'Event field is missing in get_data response')
self.assertTrue('names' in data, 'Names field is missing in get_data response')
self.assertEqual(len(data['data']), 1, 'There should have 1 data')
self.assertEqual(len(data['names']), len(values)+1, 'There should have %d names' % (len(values)+1))
self.assertTrue('timestamp' in data['names'], 'ts column should be returned in names')
self.assertTrue('test1' in data['names'], 'test1 column should be returned in names')
for name in [value['field'] for value in values]:
self.assertTrue(name in data['names'], 'Name "%s" should exists in names' % name)
for value in [(value['field'], value['value']) for value in values]:
self.assertTrue(value[0] in data['data'][0], 'Name "%s" should exists in data' % value[0])
self.assertEqual(value[1], data['data'][0][value[0]], 'Invalid value saved for field "%s"' % value[0])
def test_get_data_2(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}]
self.module._save_data(uuid, event, values)
end = int(time.time())
data = self.module.get_data(uuid, start, end)
self.assertTrue('data' in data, 'Data field is missing in get_data response')
self.assertTrue('uuid' in data, 'Uuid field is missing in get_data response')
self.assertTrue('event' in data, 'Event field is missing in get_data response')
self.assertTrue('names' in data, 'Names field is missing in get_data response')
self.assertEqual(len(data['data']), 1, 'There should have 1 data')
self.assertEqual(len(data['names']), len(values)+1, 'There should have %d names' % (len(values)+1))
for name in [value['field'] for value in values]:
self.assertTrue(name in data['names'], 'Name "%s" should exists in names' % name)
for value in [(value['field'], value['value']) for value in values]:
self.assertTrue(value[0] in data['data'][0], 'Name "%s" should exists in data' % value[0])
self.assertEqual(value[1], data['data'][0][value[0]], 'Invalid value saved for field "%s"' % value[0])
def test_get_data_3(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}]
self.module._save_data(uuid, event, values)
end = int(time.time())
data = self.module.get_data(uuid, start, end)
self.assertTrue('data' in data, 'Data field is missing in get_data response')
self.assertTrue('uuid' in data, 'Uuid field is missing in get_data response')
self.assertTrue('event' in data, 'Event field is missing in get_data response')
self.assertTrue('names' in data, 'Names field is missing in get_data response')
self.assertEqual(len(data['data']), 1, 'There should have 1 data')
self.assertEqual(len(data['names']), len(values)+1, 'There should have %d names' % (len(values)+1))
for name in [value['field'] for value in values]:
self.assertTrue(name in data['names'], 'Name "%s" should exists in names' % name)
for value in [(value['field'], value['value']) for value in values]:
self.assertTrue(value[0] in data['data'][0], 'Name "%s" should exists in data' % value[0])
self.assertEqual(value[1], data['data'][0][value[0]], 'Invalid value saved for field "%s"' % value[0])
def test_get_data_4(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values)
end = int(time.time())
data = self.module.get_data(uuid, start, end)
self.assertTrue('data' in data, 'Data field is missing in get_data response')
self.assertTrue('uuid' in data, 'Uuid field is missing in get_data response')
self.assertTrue('event' in data, 'Event field is missing in get_data response')
self.assertTrue('names' in data, 'Names field is missing in get_data response')
self.assertEqual(len(data['data']), 1, 'There should have 1 data')
self.assertEqual(len(data['names']), len(values)+1, 'There should have %d names' % (len(values)+1))
for name in [value['field'] for value in values]:
self.assertTrue(name in data['names'], 'Name "%s" should exists in names' % name)
for value in [(value['field'], value['value']) for value in values]:
self.assertTrue(value[0] in data['data'][0], 'Name "%s" should exists in data' % value[0])
self.assertEqual(value[1], data['data'][0][value[0]], 'Invalid value saved for field "%s"' % value[0])
def test_get_data_with_options(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values1 = [{'field':'test1', 'value':1}, {'field':'test2', 'value':1}]
values2 = [{'field':'test1', 'value':2}, {'field':'test2', 'value':2}]
values3 = [{'field':'test1', 'value':3}, {'field':'test2', 'value':3}]
values4 = [{'field':'test1', 'value':4}, {'field':'test2', 'value':4}]
self.module._save_data(uuid, event, values1)
time.sleep(1.0)
self.module._save_data(uuid, event, values2)
time.sleep(1.0)
self.module._save_data(uuid, event, values3)
time.sleep(1.0)
self.module._save_data(uuid, event, values4)
end = int(time.time())
# sort asc
data = self.module.get_data(uuid, start, end, {'sort':'asc'})
ts = [row['ts'] for row in data['data']]
ts_sorted = ts[:]
ts_sorted.sort()
self.assertTrue(ts==ts_sorted, 'Sort by asc is invalid')
# sort desc
data = self.module.get_data(uuid, start, end, {'sort':'desc'})
ts = [row['ts'] for row in data['data']]
ts_sorted = ts[:]
ts_sorted.sort(reverse=True)
self.assertTrue(ts==ts_sorted, 'Sort by desc is invalid')
# limit
data = self.module.get_data(uuid, start, end, {'limit':2})
self.assertEqual(len(data['data']), 2, 'Limit option is invalid')
# fields
data = self.module.get_data(uuid, start, end, {'fields':['test2']})
self.assertEqual(len(data['names']), 2, 'Only two colums should be returned')
self.assertTrue('test2' in data['names'], 'test2 column should only be returned')
self.assertTrue('test2' in data['data'][0], 'test2 column should be returned')
self.assertTrue('test1' not in data['data'][0], 'test1 column should not be returned')
# output as list
data = self.module.get_data(uuid, start, end, {'output':'list'})
logging.debug('data=%s' % data['data'])
self.assertTrue('test1' in data['data'], 'test1 colum should be returned in data dict')
self.assertTrue('test2' in data['data'], 'test1 colum should be returned in data dict')
self.assertTrue('values' in data['data']['test1'], 'Data should contain "values" key')
self.assertTrue('name' in data['data']['test1'], 'Data should contain "name" key')
self.assertTrue(isinstance(data['data']['test1']['values'], list), 'Data should be returned as list')
def test_get_data_disable_average(self):
self.init()
values = []
start = int(time.time())
uuid = '123-456-789'
count = 100
for i in range(count):
values.append((start + i, uuid, i, i, i))
# logging.debug('Values: %s' % values)
self.__fill_data_table('data3', values)
# logging.debug('Data3: %s' % self.__get_table_rows('data3'))
# logging.debug('Device: %s' % self.__get_table_rows('devices'))
values_size = sys.getsizeof(values)
logging.debug('Values size: %s' % values_size)
# set low value for max data size to make sure to trigger average if available
self.module.MAX_DATA_SIZE = int(round(values_size / 20.0))
# make sure average is enable
data = self.module.get_data(uuid, start, start+count*2)
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), int(len(values)/20.0))
# test average disabled
data = self.module.get_data(uuid, start, start+count*2, {'average': False})
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), len(values))
def test_get_data_check_dict_output_averaged(self):
self.init()
values = []
start = int(time.time())
uuid = '123-456-789'
count = 100
for i in range(count):
values.append((start + i, uuid, i, i, i))
# logging.debug('Values: %s' % values)
self.__fill_data_table('data3', values)
# logging.debug('Data3: %s' % self.__get_table_rows('data3'))
# logging.debug('Device: %s' % self.__get_table_rows('devices'))
values_size = sys.getsizeof(values)
logging.debug('Values size: %s' % values_size)
# no average
data = self.module.get_data(uuid, start, start+count*2)
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), len(values))
# reduce by 2 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 2.0))
data = self.module.get_data(uuid, start, start+count*2)
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), int(len(values)/2.0))
# reduce by 4 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 4.0))
data = self.module.get_data(uuid, start, start+count*2)
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), int(len(values)/4.0))
# reduce by 20 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 20.0))
data = self.module.get_data(uuid, start, start+count*2)
logging.debug('Data size: %s' % len(data['data']))
self.assertEqual(len(data['data']), int(len(values)/20.0))
# check if data is well averaged for last sample (5 data)
logging.debug('Data: %s' % data['data'])
for i in range(5):
value = 9.5 + 20 * i
self.assertEqual(data['data'][i]['field1'], value)
self.assertEqual(data['data'][i]['field2'], value)
self.assertEqual(data['data'][i]['field3'], value)
def test_get_data_check_list_output_averaged(self):
self.init()
values = []
start = int(time.time())
uuid = '123-456-789'
count = 100
for i in range(count):
values.append((start + i, uuid, i, i, i))
# logging.debug('Values: %s' % values)
self.__fill_data_table('data3', values)
# logging.debug('Data3: %s' % self.__get_table_rows('data3'))
# logging.debug('Device: %s' % self.__get_table_rows('devices'))
values_size = sys.getsizeof(values)
logging.debug('Values size: %s' % values_size)
# no average
data = self.module.get_data(uuid, start, start+count*2, {'output':'list'})
# logging.debug('Data: %s' % data)
logging.debug('Data size: %s' % len(data['data']['field1']['values']))
self.assertEqual(len(data['data']['field1']['values']), len(values))
self.assertEqual(len(data['data']['field2']['values']), len(values))
self.assertEqual(len(data['data']['field3']['values']), len(values))
# reduce by 2 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 2.0))
data = self.module.get_data(uuid, start, start+count*2, {'output':'list'})
logging.debug('Data size: %s' % len(data['data']['field1']['values']))
self.assertEqual(len(data['data']['field1']['values']), int(len(values)/2.0))
self.assertEqual(len(data['data']['field2']['values']), int(len(values)/2.0))
self.assertEqual(len(data['data']['field3']['values']), int(len(values)/2.0))
# reduce by 4 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 4.0))
data = self.module.get_data(uuid, start, start+count*2, {'output':'list'})
logging.debug('Data size: %s' % len(data['data']['field1']['values']))
self.assertEqual(len(data['data']['field1']['values']), int(len(values)/4.0))
self.assertEqual(len(data['data']['field2']['values']), int(len(values)/4.0))
self.assertEqual(len(data['data']['field3']['values']), int(len(values)/4.0))
# reduce by 20 values size to get average data to be triggered and return half of data
self.module.MAX_DATA_SIZE = int(round(values_size / 20.0))
data = self.module.get_data(uuid, start, start+count*2, {'output':'list'})
logging.debug('Data size: %s' % len(data['data']['field1']['values']))
self.assertEqual(len(data['data']['field1']['values']), int(len(values)/20.0))
self.assertEqual(len(data['data']['field2']['values']), int(len(values)/20.0))
self.assertEqual(len(data['data']['field3']['values']), int(len(values)/20.0))
# check if data is well averaged for last sample (5 data)
logging.debug('Data: %s' % data['data']['field1']['values'])
for i in range(5):
value = 9.5 + 20 * i
self.assertEqual(data['data']['field1']['values'][i][1], value)
self.assertEqual(data['data']['field2']['values'][i][1], value)
self.assertEqual(data['data']['field3']['values'][i][1], value)
def test_get_data_invalid_parameters(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values)
end = int(time.time())
with self.assertRaises(MissingParameter) as cm:
self.module.get_data(None, start, end)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module.get_data('', start, end)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module.get_data(uuid, None, end)
self.assertEqual(cm.exception.message, 'Parameter "timestamp_start" is missing', 'Timestamp_start should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module.get_data(uuid, start, None)
self.assertEqual(cm.exception.message, 'Parameter "timestamp_end" is missing', 'Timestamp_end should not be None')
with self.assertRaises(InvalidParameter) as cm:
self.module.get_data(uuid, -1, end)
self.assertEqual(cm.exception.message, 'Timestamp_start value must be positive', 'Timestamp_start should be >0')
with self.assertRaises(InvalidParameter) as cm:
self.module.get_data(uuid, start, -1)
self.assertEqual(cm.exception.message, 'Timestamp_end value must be positive', 'Timestamp_end should be >0')
def test_purge_data_1(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}]
self.module._save_data(uuid, event, values)
end = int(time.time())
count = self.__get_table_count('data1')
self.assertEqual(count, 1, 'Data1 should not be empty')
self.module.purge_data(uuid, end+1)
count = self.__get_table_count('data1')
self.assertEqual(count, 0, 'Data1 should be empty')
def test_purge_data_2(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}]
self.module._save_data(uuid, event, values)
end = int(time.time())
count = self.__get_table_count('data2')
self.assertEqual(count, 1, 'Data2 should not be empty')
self.module.purge_data(uuid, end+1)
count = self.__get_table_count('data2')
self.assertEqual(count, 0, 'Data2 should be empty')
def test_purge_data_3(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}]
self.module._save_data(uuid, event, values)
end = int(time.time())
count = self.__get_table_count('data3')
self.assertEqual(count, 1, 'Data3 should not be empty')
self.module.purge_data(uuid, end+1)
count = self.__get_table_count('data3')
self.assertEqual(count, 0, 'Data3 should be empty')
def test_purge_data_4(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values)
end = int(time.time())
count = self.__get_table_count('data4')
self.assertEqual(count, 1, 'Data4 should not be empty')
self.module.purge_data(uuid, end+1)
count = self.__get_table_count('data4')
self.assertEqual(count, 0, 'Data4 should be empty')
def test_purge_data_missing_parameters(self):
self.init()
start = int(time.time())
uuid = '123-456-789'
event = 'test.test.test'
values = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values)
end = int(time.time())
with self.assertRaises(MissingParameter) as cm:
self.module.purge_data(None, end)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module.purge_data('', end)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be empty')
with self.assertRaises(MissingParameter) as cm:
self.module.purge_data(uuid, None)
self.assertEqual(cm.exception.message, 'Parameter "timestamp_until" is missing', 'Timestamp_until should not be None')
with self.assertRaises(InvalidParameter) as cm:
self.module.purge_data(uuid, -1)
self.assertEqual(cm.exception.message, 'Timestamp_until value must be positive', 'Timestamp_until should be >0')
def test_delete_device_data(self):
self.init()
uuid = '123-456-789'
event = 'test.test.test'
values1 = [{'field':'test1', 'value':1}]
values2 = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}]
values3 = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}]
values4 = [{'field':'test1', 'value':1}, {'field':'test2', 'value':2}, {'field':'test3', 'value':3}, {'field':'test4', 'value':4}]
self.module._save_data(uuid, event, values1)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 1, 'Device data should be inserted')
count = self.__get_table_count('devices', uuid)
self.assertEqual(count, 1, 'Device should be inserted')
self.module._delete_device(uuid)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Device data should be deleted')
count = self.__get_table_count('devices', uuid)
self.assertEqual(count, 0, 'Device should be deleted')
self.module._save_data(uuid, event, values2)
self.module._delete_device(uuid)
count = self.__get_table_count('data2', uuid)
self.assertEqual(count, 0, 'Device data should be deleted')
count = self.__get_table_count('devices', uuid)
self.assertEqual(count, 0, 'Device should be deleted')
self.module._save_data(uuid, event, values3)
self.module._delete_device(uuid)
count = self.__get_table_count('data3', uuid)
self.assertEqual(count, 0, 'Device data should be deleted')
count = self.__get_table_count('devices', uuid)
self.assertEqual(count, 0, 'Device should be deleted')
self.module._save_data(uuid, event, values4)
self.module._delete_device(uuid)
count = self.__get_table_count('data4', uuid)
self.assertEqual(count, 0, 'Device data should be deleted')
count = self.__get_table_count('devices', uuid)
self.assertEqual(count, 0, 'Device should be deleted')
def test_delete_device_data_invalid_parameters(self):
self.init()
uuid = '123-456-789'
with self.assertRaises(MissingParameter) as cm:
self.module._delete_device(None)
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
with self.assertRaises(MissingParameter) as cm:
self.module._delete_device('')
self.assertEqual(cm.exception.message, 'Parameter "device_uuid" is missing', 'Uuid should not be None')
def test_event_received(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
fake_event = FakeEvent([{'field':'test', 'value':666}])
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
self.module.event_received(event)
self.assertEqual(self.module.events_broker.get_event_instance.call_count, 1, 'Get_event_instance should be called')
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 1, 'Data1 should contain single record')
def test_event_received_delete_device(self):
self.init()
uuid = '123-456-789'
event = {'event':'system.device.delete', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
self.module._delete_device = Mock()
self.module.event_received(event)
self.assertEqual(self.module._delete_device.call_count, 1, '_delete_device should be called')
def test_event_received_event_not_found(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
self.module.events_broker.get_event_instance = Mock(return_value=None)
self.module.event_received(event)
self.assertEqual(self.module.events_broker.get_event_instance.call_count, 1, 'Get_event_instance should be called')
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
def test_event_received_no_chart_value(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
fake_event = FakeEvent(None)
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
self.assertEqual(self.module.events_broker.get_event_instance.call_count, 1, 'Get_event_instance should be called')
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
def test_event_received_invalid_chart_value(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
fake_event = FakeEvent({})
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
fake_event = FakeEvent(666)
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
fake_event = FakeEvent('evil')
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 0, 'Data1 should be empty')
def test_event_received_single_true_value(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
fake_event = FakeEvent([{'field':'test', 'value':True}])
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
self.assertEqual(self.module.events_broker.get_event_instance.call_count, 1, 'Get_event_instance should be called')
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 2, 'Data1 should contain 2 records')
rows = self.__get_table_rows('data1')
self.assertEqual(rows[0][3], 0, '0 value should be inserted before real value')
self.assertEqual(rows[1][3], 1, '1 value should be inserted instead of real value')
def test_event_received_single_false_value(self):
self.init()
uuid = '123-456-789'
event = {'event':'test.test.test', 'params':{}, 'startup':False, 'device_id':uuid, 'from':'test'}
fake_event = FakeEvent([{'field':'test', 'value':False}])
self.module.events_broker.get_event_instance = Mock(return_value=fake_event)
self.module.event_received(event)
self.assertEqual(self.module.events_broker.get_event_instance.call_count, 1, 'Get_event_instance should be called')
count = self.__get_table_count('data1', uuid)
self.assertEqual(count, 2, 'Data1 should contain 2 records')
rows = self.__get_table_rows('data1')
self.assertEqual(rows[0][3], 1, '1 value should be inserted before real value')
self.assertEqual(rows[1][3], 0, '0 value should be inserted instead of real value')
if __name__ == "__main__":
# coverage run --omit="*lib/python*/*","test_*" --concurrency=thread test_charts.py; coverage report -m -i
unittest.main()
|
py | 1a36d1cb2f3c4be68593308e2c9f2e9f3708d09b | #!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.7
@author: JYFelt
@license: Apache Licence
@contact: [email protected]
@site: https://blog.csdn.net/weixin_38034182
@software: PyCharm
@file: ggplot_demo.py
@time: 2019/8/15 17:16
"""
from ggplot import *
p = ggplot(mtcars, aes('mpg', 'wt', color='factor(cyl)')) + geom_point() + ggtitle('mtcars')
print(p)
|
py | 1a36d28ca1bddfb6ebb411fae16e8d2f8c6ba862 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import metrics
from lingvo.core import test_utils
from six.moves import range
import tensorflow as tf
class MetricsTest(test_utils.TestCase):
def testAverageMetric(self):
m = metrics.AverageMetric()
m.Update(1.0)
m.Update(2.0, 10.0)
self.assertEqual(1.0 + 2.0*10.0, m.total_value)
expected_average = (1.0 + 2.0*10.0) / (1.0 + 10.0)
self.assertEqual(expected_average, m.value)
name = 'metric_name'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name,
simple_value=expected_average)]),
m.Summary(name))
# Calling m.Summary() does not reset statistics.
m.Update(1.0)
self.assertEqual(1.0 + 2.0*10.0 + 1.0, m.total_value)
def testF1Metric(self):
m = metrics.F1Metric()
m.UpdateTruePositive(count=2.0)
m.UpdateFalsePositive()
m.UpdateFalseNegative()
precision = 2.0 / 3.0
recall = 2.0 / 3.0
expected_f1 = 2 * precision * recall / (precision + recall)
self.assertAlmostEqual(expected_f1, m.value)
name = 'my_f1_metric'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name,
simple_value=expected_f1)]),
m.Summary(name))
def testCorpusBleuMetric(self):
m = metrics.CorpusBleuMetric()
m.Update('a b c d', 'a b c d')
m.Update('a b c', 'a b c')
self.assertEqual(1.0, m.value)
name = 'corpus_bleu'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name, simple_value=1.0)]),
m.Summary(name))
if __name__ == '__main__':
tf.test.main()
|
py | 1a36d33334636f81d1e6f07e0883fcd60d66f127 | """tango_with_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from rango import views
urlpatterns = [
url(r'^$', views.index, name='index'),
# URLs starting with rango/ are forwarded to rango app
url(r'^rango/', include('rango.urls')),
url(r'^admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # tell Django to serve static content from MEDIA_URL
|
py | 1a36d3b53b059f6e775eab66c6336e023aff371e | import logging
import sh
from cached_property import cached_property
from kubeyard.base_command import CommandException
from kubeyard.commands.devel import BaseDevelCommand
logger = logging.getLogger(__name__)
class ShellCommand(BaseDevelCommand):
"""
Command allows you to exec into container.
"""
custom_script_name = 'shell'
context_vars = ['pod', 'shell']
def __init__(self, *, shell, pod, container, root, **kwargs):
super().__init__(**kwargs)
self.shell = shell
self.pod = pod
self.container = container
self.root = root
def run_default(self):
try:
sh.kubectl.exec(
"-it",
self.pod_name,
"-c", self.container_name,
'--',
self.shell,
"-c", self.before_command,
_fg=True,
)
except sh.ErrorReturnCode_130:
# Command exited using Ctrl+D or Ctrl+C
pass
finally:
if self.after_command:
sh.kubectl.exec(
self.pod_name,
"-c", self.container_name,
"--",
self.shell,
"-c", self.after_command,
)
@cached_property
def pod_name(self) -> str:
if self.pod:
all_pods = sh.kubectl.get.pods('-o', 'jsonpath={.items[*].metadata.name}').split()
# Exact match
if self.pod in all_pods:
return self.pod
# Starting-with match
pods = [pod for pod in all_pods if pod.startswith(self.pod)]
pods.sort(key=len)
if len(pods) == 0:
raise CommandException(f"Not found pod equal or starting with '{self.pod}'")
if len(pods) > 1:
logger.warning(f"Found more than one pod. Using '{pods[0]}'")
return pods[0]
else:
for pod in sh.kubectl.get.pods(_iter='out'):
if self.image_name in pod:
return pod.split()[0]
raise CommandException("Container not found, please specify container or fix project setup.")
@cached_property
def container_name(self) -> str:
if self.container:
return self.container
else:
return self.image_name
@cached_property
def username(self) -> str:
return str(sh.whoami()).strip()
@property
def before_command(self):
if self.root:
return self.shell
return (
'groupadd -f -g {gid} {username}; '
'adduser -q --gecos "" --disabled-password --uid {uid} --gid {gid} {username}; '
'su {username}; '
).format(
gid=self.gid,
uid=self.uid,
username=self.username,
)
@property
def after_command(self) -> str:
if self.root:
return ""
else:
return "userdel --remove {username}; ".format(username=self.username)
|
py | 1a36d3b9864d73d8a0accdd398741213b08377a1 | #######################################################################
#
# Push Service for Enigma-2
# Coded by betonme (c) 2012 <glaserfrank(at)gmail.com>
# Support: http://www.i-have-a-dreambox.com/wbb2/thread.php?threadid=167779
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#######################################################################
# Config
from Components.config import ConfigYesNo, ConfigText, ConfigNumber, NoSave
# Plugin internal
from Plugins.Extensions.PushService.ControllerBase import ControllerBase
# Plugin specific
import os
# Constants
SUBJECT = _("List of Files")
class ListDir(ControllerBase):
ForceSingleInstance = True
def __init__(self):
# Is called on instance creation
ControllerBase.__init__(self)
self.movielist= []
# Default configuration
self.setOption( 'path', NoSave(ConfigText( default = "/media/hdd/movie/", fixed_size = False )), _("Where to check") )
self.setOption( 'ext', NoSave(ConfigText( default = ".ts", fixed_size = False )), _("file extension") )
def run(self, callback, errback):
# At the end a plugin has to call one of the functions: callback or errback
# Callback should return with at least one of the parameter: Header, body (List of Files)
# If empty or none is returned, nothing will be sent
path = self.getValue('path')
ext = self.getValue('ext')
movielist = []
for file in os.listdir( path ):
if file.endswith( ext ):
movielist.append(file)
body = "The following files were found: \n" + "\n".join(movielist)
if movielist:
callback( SUBJECT, body )
else:
callback()
|
py | 1a36d3fabeb8ba0ca61434e5991ff65b2d23d712 | from re_test_patterns import test_patterns
test_patterns(
'This is some text -- with punctuation.',
[(r'^\w+', 'word at start of string'),
(r'\A\w+', 'word at start of string'),
(r'\w+\S*$', 'word near end of string'),
(r'\w+\S*\Z', 'word near end of string'),
(r'\w*t\w*', 'word containing t'),
(r'\bt\w+', 't at start of word'),
(r'\w+t\b', 't at end of word'),
(r'\Bt\B', 't, not start or end of word')],
)
|
py | 1a36d4ba5e0d04d95650d4ccfc883ec567bb790f | ## First, some preliminaries that will be needed.
import hashlib
from paillier import *
from primes import *
print( "Generating keypair...")
pllr_priv_1, pllr_pub_1 = generate_keypair(512)
pllr_priv_2, pllr_pub_2 = generate_keypair(512)
def sha512(s):
return hashlib.sha512(s).digest()
# Base field Z_p
p = 2**255 - 19
def modp_inv(x):
return pow(x, p-2, p)
# Curve constant
d = -121665 * modp_inv(121666) % p
# Group order
q = 2**252 + 27742317777372353535851937790883648493
def sha512_modq(s):
return int.from_bytes(sha512(s), "little") % q
## Then follows functions to perform point operations.
# Points are represented as tuples (X, Y, Z, T) of extended
# coordinates, with x = X/Z, y = Y/Z, x*y = T/Z
def point_add(P, Q):
A, B = (P[1]-P[0]) * (Q[1]-Q[0]) % p, (P[1]+P[0]) * (Q[1]+Q[0]) % p;
C, D = 2 * P[3] * Q[3] * d % p, 2 * P[2] * Q[2] % p;
E, F, G, H = B-A, D-C, D+C, B+A;
return (E*F, G*H, F*G, E*H);
# Computes Q = s * Q
def point_mul(s, P):
Q = (0, 1, 1, 0) # Neutral element
while s > 0:
if s & 1:
Q = point_add(Q, P)
P = point_add(P, P)
s >>= 1
return Q
def point_equal(P, Q):
# x1 / z1 == x2 / z2 <==> x1 * z2 == x2 * z1
if (P[0] * Q[2] - Q[0] * P[2]) % p != 0:
return False
if (P[1] * Q[2] - Q[1] * P[2]) % p != 0:
return False
return True
## Now follows functions for point compression.
# Square root of -1
modp_sqrt_m1 = pow(2, (p-1) // 4, p)
# Compute corresponding x-coordinate, with low bit corresponding to
# sign, or return None on failure
def recover_x(y, sign):
if y >= p:
return None
x2 = (y*y-1) * modp_inv(d*y*y+1)
if x2 == 0:
if sign:
return None
else:
return 0
# Compute square root of x2
x = pow(x2, (p+3) // 8, p)
if (x*x - x2) % p != 0:
x = x * modp_sqrt_m1 % p
if (x*x - x2) % p != 0:
return None
if (x & 1) != sign:
x = p - x
return x
# Base point
g_y = 4 * modp_inv(5) % p
g_x = recover_x(g_y, 0)
G = (g_x, g_y, 1, g_x * g_y % p)
def point_compress(P):
zinv = modp_inv(P[2])
x = P[0] * zinv % p
y = P[1] * zinv % p
return int.to_bytes(y | ((x & 1) << 255), 32, "little")
def point_decompress(s):
if len(s) != 32:
raise Exception("Invalid input length for decompression")
y = int.from_bytes(s, "little")
sign = y >> 255
y &= (1 << 255) - 1
x = recover_x(y, sign)
if x is None:
return None
else:
return (x, y, 1, x*y % p)
## These are functions for manipulating the private key.
def secret_expand(secret):
if len(secret) != 32:
raise Exception("Bad size of private key")
h = sha512(secret)
a = int.from_bytes(h[:32], "little")
a &= (1 << 254) - 8
a |= (1 << 254)
return (a, h[32:])
def secret_to_public(secret):
(a, dummy) = secret_expand(secret)
return point_compress(point_mul(a, G))
## The signature function works as below.
def sign(secret, msg):
a, prefix = secret_expand(secret)
A = point_compress(point_mul(a, G))
r = sha512_modq(prefix + msg)
R = point_mul(r, G)
Rs = point_compress(R)
h = sha512_modq(Rs + A + msg)
s = (r + h * a) % q
return Rs + int.to_bytes(s, 32, "little")
## And finally the verification function.
def verify(public, msg, signature, Ar=None):
if len(public) != 32:
raise Exception("Bad public key length")
if len(signature) != 64:
Exception("Bad signature length")
A = point_decompress(public)
if not A:
return False
Rs = signature[:32]
R = point_decompress(Rs)
if not R:
return False
s = int.from_bytes(signature[32:], "little")
if s >= q: return False
h = sha512_modq(Rs + public + msg)
sB = point_mul(s, G)
# A = point_add(A, (0,-1, -1, 0))
if Ar is None:
hA = point_mul(h, A)
else:
print("Ar")
hA = point_mul(h, point_decompress(Ar))
return point_equal(sB, point_add(R, hA))
from binascii import unhexlify, hexlify
secret1 = unhexlify("ce266dfb7c193ac833c16252a30b74bf0384051c769f602c1d7f7b6c81526bbc") # naturally has to become true random
secret2 = unhexlify("c171e7f9b32dc26571ee54e026aabccdba48272384e2493436a85b6b6c713642") # naturally has to become true random
msg = b"Hello"
# Priv, Public
a1, prefix1 = secret_expand(secret1)
a2, prefix2 = secret_expand(secret2)
#amul = a1 + a2
A1 = point_mul(a1, G)
A2 = point_mul(a2, G)
A = point_compress(point_add(A1, A2))
#AMUL = point_mul(a1*a2, G)
#AMPC = point_compress(AMUL)
# Random
r1 = int("de266dfb7c193ac833c16252a30b74bf0384051c76e24934367f7b6c81526bbc", 16) # naturally has to become true random
r2 = int("d171e7f9b3193ac833c164e026aabccdba48272384e2493436a85b6b6c713642", 16) # naturally has to become true random
r = r1 + r2
assert r1 != 0%q and r1 != 1%q
assert r2 != 0%q and r2 != 1%q
R1 = point_mul(r1, G)
R2 = point_mul(r2, G)
Rs = point_compress(point_add(R1, R2))
h = sha512_modq(Rs + A + msg)
s1 = (r1 + h*a1) % q
s2 = (r2 + h*a2) % q
s = s1 + s2
smul = ((r1+r2) + (h * (a1 + a2))) % q
print(" ")
print("smul is : ", hex(smul))
print(" ")
sig = Rs + int.to_bytes(s1, 32, "little")
# A = A - A2
Ad = point_decompress(A)
T = point_add((0,1,1,0) , A2)
invT = (T[0], -T[1], -T[2], T[3])
Ar = point_compress(point_add(Ad, invT))
print(verify(A, msg, sig, Ar))
print(hexlify(sig))
print("/n/n")
''' P_1 shares ckey = Enc(d_1) with P_2 '''
# unbound: s2 = r2 + (a2 * h) % q
ckey1 = r2 + (a2 * h) % q
ckey1 = encrypt(pllr_pub_1, ckey1)
''' ckey2 stays a scalar. Therefore, no encrypt '''
# unbound: s1 = r1 + (a1 * h) % q
ckey2 = r1 + (a1 * h) % q
''' '''
ciphertext = e_add_const(pllr_pub_1, ckey1, ckey2) # s = s1 + s2
print("ciphertext = ", ciphertext)
plaintext = decrypt(pllr_priv_1, pllr_pub_1, ciphertext) % q
print("plaintext = ", plaintext)
print(" ")
print(hex(plaintext))
'''
The multiplicative version of the Weierstrass Curves are
not directly transferrable to Edwards Curves. but we can
work from the additive or collective version.
It seems that unbound is doing something similar.
Given a message m that both parties agree to sign, the parties can generate a signature on that message using the protocol of [10] as follows:
Alice and Bob choose random shares r1 and r2, respectively, and learn the value R = r1 ⋅ G + r2 ⋅ G.
This generation uses commitments in order to ensure that R is (essentially) uniformly distributed in
the group, in the case that one of the parties is corrupted.
Each party locally computes e = H( R, Q, m ).
Bob computes s2 = r2 + x2 ⋅ e mod q and sends s2 to Alice.
Alice computes s1 = r1 + x1 ⋅ e mod q and s = s1 + s2 mod q, and outputs ( R, s ).
Observe that s = s1 + s2 = ( r1 + r2 ) + ( x1 + x2 ) ⋅ e = r + x ⋅ e mod q, as required.
Instead of following the unbound proposal we have folled the aggregate
collective signing for Edwards-curves. Whick works well.
Will just have to split R into r1 and r2 to avoid malicious co-signers
''' |
py | 1a36d5e80636fd777697ac035bcf47f7e1c413aa | '''
Gramex {__version__} Copyright (c) 2017 by Gramener
Start the Gramex server on port 9988 at the current directory.
If no gramex.yaml exists, show the guide (https://learn.gramener.com/guide/)
Options
--listen.port=9090 Starts Gramex at port 9090
--browser Open the browser after startup
--settings.debug Enable serving tracebacks and autoreload
--settings.xsrf_cookies=false Disable XSRF cookies (only for testing)
--settings.cookie_secret=... Change cookie encryption key
Helper applications
gramex init Add Gramex project scaffolding to current dir
gramex service Windows service setup
gramex mail Send email from command line
gramex license See Gramex license, accept or reject it
Installation commands. Run without arguments to see help
gramex install Install an app
gramex update Update an app
gramex setup Run make, npm install, bower install etc on app
gramex run Run an installed app
gramex uninstall Uninstall an app
'''
import os
import sys
import json
import yaml
import logging
import logging.config
import tornado.ioloop
from pathlib import Path
from orderedattrdict import AttrDict
from gramex.config import ChainConfig, PathConfig, app_log, variables, setup_variables
from gramex.config import ioloop_running, prune_keys, setup_secrets
paths = AttrDict() # Paths where configurations are stored
conf = AttrDict() # Final merged configurations
config_layers = ChainConfig() # Loads all configurations. init() updates it
paths['source'] = Path(__file__).absolute().parent # Where gramex source code is
paths['base'] = Path('.') # Where gramex is run from
callbacks = {} # Services callbacks
# Populate __version__ from release.json
with (paths['source'] / 'release.json').open() as _release_file:
release = json.load(_release_file, object_pairs_hook=AttrDict)
__version__ = release.info.version
_sys_path = list(sys.path) # Preserve original sys.path
# List of URLs to warn about in case of duplicates
PathConfig.duplicate_warn = [
'url.*',
'cache.*',
'schedule.*',
'watch.*',
'email.*',
'alert.*',
'sms.*',
'log.loggers.*', 'log.handlers.*', 'log.formatters.*',
]
def parse_command_line(commands):
'''
Parse command line arguments. For example:
gramex cmd1 cmd2 --a=1 2 -b x --c --p.q=4
returns:
{"_": ["cmd1", "cmd2"], "a": [1, 2], "b": "x", "c": True, "p": {"q": [4]}}
Values are parsed as YAML. Arguments with '.' are split into subgroups. For
example, ``gramex --listen.port 80`` returns ``{"listen": {"port": 80}}``.
'''
group = '_'
args = AttrDict({group: []})
for arg in commands:
if arg.startswith('-'):
group, value = arg.lstrip('-'), 'True'
if '=' in group:
group, value = group.split('=', 1)
else:
value = arg
value = yaml.safe_load(value)
base = args
keys = group.split('.')
for key in keys[:-1]:
base = base.setdefault(key, AttrDict())
# Add the key to the base.
# If it's already there, make it a list.
# If it's already a list, append to it.
if keys[-1] not in base or base[keys[-1]] is True:
base[keys[-1]] = value
elif not isinstance(base[keys[-1]], list):
base[keys[-1]] = [base[keys[-1]], value]
else:
base[keys[-1]].append(value)
return args
def callback_commandline(commands):
'''
Find what method should be run based on the command line programs. This
refactoring allows us to test gramex.commandline() to see if it processes
the command line correctly, without actually running the commands.
Returns a callback method and kwargs for the callback method.
'''
# Set logging config at startup. (Services may override this.)
log_config = (+PathConfig(paths['source'] / 'gramex.yaml')).get('log', AttrDict())
log_config.root.level = logging.INFO
from . import services
services.log(log_config)
# args has all optional command line args as a dict of values / lists.
# cmd has all positional arguments as a list.
args = parse_command_line(commands)
cmd = args.pop('_')
# If --help or -V --version is specified, print a message and end
if args.get('V') is True or args.get('version') is True:
return console, {'msg': 'Gramex %s' % __version__}
if args.get('help') is True:
return console, {'msg': __doc__.strip().format(**globals())}
# Any positional argument is treated as a gramex command
if len(cmd) > 0:
kwargs = {'cmd': cmd, 'args': args}
base_command = cmd.pop(0).lower()
method = 'install' if base_command == 'update' else base_command
if method in {
'install', 'uninstall', 'setup', 'run', 'service', 'init',
'mail', 'license',
}:
import gramex.install
return getattr(gramex.install, method), kwargs
raise NotImplementedError('Unknown gramex command: %s' % base_command)
# Use current dir as base (where gramex is run from) if there's a gramex.yaml.
if not os.path.isfile('gramex.yaml'):
return console, {'msg': 'No gramex.yaml. See https://learn.gramener.com/guide/'}
app_log.info('Gramex %s | %s | Python %s', __version__, os.getcwd(),
sys.version.replace('\n', ' '))
return init, {'cmd': AttrDict(app=args)}
def commandline(args=None):
'''
Run Gramex from the command line. Called via:
- setup.py console_scripts when running gramex
- __main__.py when running python -m gramex
'''
callback, kwargs = callback_commandline(sys.argv[1:] if args is None else args)
callback(**kwargs)
def gramex_update(url):
'''If a newer version of gramex is available, logs a warning'''
import time
import requests
import platform
from . import services
if not services.info.eventlog:
return app_log.error('eventlog: service is not running. So Gramex update is disabled')
query = services.info.eventlog.query
update = query('SELECT * FROM events WHERE event="update" ORDER BY time DESC LIMIT 1')
delay = 24 * 60 * 60 # Wait for one day before updates
if update and time.time() < update[0]['time'] + delay:
return app_log.debug('Gramex update ran recently. Deferring check.')
meta = {
'dir': variables.get('GRAMEXDATA'),
'uname': platform.uname(),
}
if update:
events = query('SELECT * FROM events WHERE time > ? ORDER BY time',
(update[0]['time'], ))
else:
events = query('SELECT * FROM events')
logs = [dict(log, **meta) for log in events]
r = requests.post(url, data=json.dumps(logs))
r.raise_for_status()
update = r.json()
version = update['version']
if version > __version__:
app_log.error('Gramex %s is available. See https://learn.gramener.com/guide/', version)
elif version < __version__:
app_log.warning('Gramex update: your version %s is ahead of the stable %s',
__version__, version)
else:
app_log.debug('Gramex version %s is up to date', __version__)
services.info.eventlog.add('update', update)
return {'logs': logs, 'response': update}
def console(msg):
'''Write message to console'''
print(msg) # noqa
def init(force_reload=False, **kwargs):
'''
Update Gramex configurations and start / restart the instance.
``gramex.init()`` can be called any time to refresh configuration files.
``gramex.init(key=val)`` adds ``val`` as a configuration layer named
``key``. If ``val`` is a Path, it is converted into a PathConfig. (If it is
Path directory, use ``gramex.yaml``.)
Services are re-initialised if their configurations have changed. Service
callbacks are always re-run (even if the configuration hasn't changed.)
'''
try:
setup_secrets(paths['base'])
except Exception as e:
app_log.exception(e)
# Reset variables
variables.clear()
variables.update(setup_variables())
# Initialise configuration layers with provided configurations
# AttrDicts are updated as-is. Paths are converted to PathConfig
paths.update(kwargs)
for key, val in paths.items():
if isinstance(val, Path):
if val.is_dir():
val = val / 'gramex.yaml'
val = PathConfig(val)
config_layers[key] = val
# Locate all config files
config_files = set()
for path_config in config_layers.values():
if hasattr(path_config, '__info__'):
for pathinfo in path_config.__info__.imports:
config_files.add(pathinfo.path)
config_files = list(config_files)
# Add config file folders to sys.path
sys.path[:] = _sys_path + [str(path.absolute().parent) for path in config_files]
from . import services
globals()['service'] = services.info # gramex.service = gramex.services.info
# Override final configurations
final_config = +config_layers
# --settings.debug => log.root.level = True
if final_config.app.get('settings', {}).get('debug', False):
final_config.log.root.level = logging.DEBUG
# Set up a watch on config files (including imported files)
if final_config.app.get('watch', True):
from services import watcher
watcher.watch('gramex-reconfig', paths=config_files, on_modified=lambda event: init())
# Run all valid services. (The "+" before config_chain merges the chain)
# Services may return callbacks to be run at the end
for key, val in final_config.items():
if key not in conf or conf[key] != val or force_reload:
if hasattr(services, key):
app_log.debug('Loading service: %s', key)
conf[key] = prune_keys(val, {'comment'})
callback = getattr(services, key)(conf[key])
if callable(callback):
callbacks[key] = callback
else:
app_log.error('No service named %s', key)
# Run the callbacks. Specifically, the app service starts the Tornado ioloop
for key in (+config_layers).keys():
if key in callbacks:
app_log.debug('Running callback: %s', key)
callbacks[key]()
def shutdown():
'''Shut down this instance'''
ioloop = tornado.ioloop.IOLoop.current()
if ioloop_running(ioloop):
app_log.info('Shutting down Gramex...')
ioloop.stop()
def log(*args, **kwargs):
'''
Logs structured information for future reference. Typical usage::
gramex.log(level='INFO', x=1, msg='abc')
This logs ``{level: INFO, x: 1, msg: abc}`` into a logging queue. If a `gramexlog` service like
ElasticSearch has been configured, it will periodically flush the logs into the server.
'''
from . import services
# gramexlog() positional arguments may have a handler and app (in any order)
# The app defaults to the first gramexlog:
handler, app = None, services.info.gramexlog.get('defaultapp', None)
for arg in args:
# Pretend that anything that has a .args is a handler
if hasattr(getattr(arg, 'args', None), 'items'):
handler = arg
# ... and anything that's a string is an index name. The last string overrides all
elif isinstance(arg, str):
app = arg
# If the user logs into an unknown app, stop immediately
try:
conf = services.info.gramexlog.apps[app]
except KeyError:
raise ValueError(f'gramexlog: no config for {app}')
# Add all URL query parameters. In case of multiple values, capture the last
if handler:
kwargs.update({key: val[-1] for key, val in handler.args.items()})
# Add additional keys specified in gramex.yaml via keys:
kwargs.update(conf.extra_keys(handler))
conf.queue.append(kwargs)
|
py | 1a36d7f550c5302b274e2d62d6c9c7c6d6af3c23 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: 2018-02-02 12:31:13
import logging
import numpy as np
from pybpodapi.protocol import Bpod, StateMachine
import task_settings
import user_settings
from iblrig.bpod_helper import BpodMessageCreator
from session_params import SessionParamHandler
from trial_params import TrialParamHandler
log = logging.getLogger("iblrig")
log.setLevel(logging.INFO)
global sph
sph = SessionParamHandler(task_settings, user_settings)
def softcode_handler(data):
"""
Soft codes should work with resasonable latency considering our limiting
factor is the refresh rate of the screen which should be 16.667ms @ a frame
rate of 60Hz
1 : go_tone
2 : white_noise
"""
global sph
if data == 0:
sph.stop_sound()
elif data == 1:
sph.play_tone()
elif data == 3:
sph.start_camera_recording()
# =============================================================================
# CONNECT TO BPOD
# =============================================================================
bpod = Bpod()
# Soft code handler function can run arbitrary code from within state machine
bpod.softcode_handler_function = softcode_handler
# TODO: Put inside SPH remove @property or organize sequence of var definition
# Bpod message creator
msg = BpodMessageCreator(bpod)
bonsai_hide_stim = msg.bonsai_hide_stim()
bonsai_show_stim = msg.bonsai_show_stim()
sc_play_tone = msg.sound_card_play_idx(sph.GO_TONE_IDX)
sph.GO_TONE_SM_TRIGGER = sc_play_tone
bpod = msg.return_bpod()
# =============================================================================
# TRIAL PARAMETERS AND STATE MACHINE
# =============================================================================
global tph
tph = TrialParamHandler(sph)
bad_stim_count = 0
bad_tone_count = 0
for i in range(sph.NTRIALS): # Main loop
tph.next_trial()
log.info(f"Starting trial: {i + 1}")
# =============================================================================
# Start state machine definition
# =============================================================================
sma = StateMachine(bpod)
if i == 0:
sma.add_state(
state_name="stim_on",
state_timer=10,
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "stim_off",
"BNC1Low": "stim_off",
},
output_actions=[("Serial1", bonsai_show_stim)],
)
else:
sma.add_state(
state_name="stim_on",
state_timer=1,
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "stim_off",
"BNC1Low": "stim_off",
},
output_actions=[("Serial1", bonsai_show_stim)],
)
sma.add_state(
state_name="stim_off",
state_timer=1, # Stim off for 1 sec
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "play_tone",
"BNC1Low": "play_tone",
},
output_actions=[("Serial1", bonsai_hide_stim)],
)
sma.add_state(
state_name="bad_stim",
state_timer=0,
state_change_conditions={"Tup": "play_tone"},
output_actions=[],
)
sma.add_state(
state_name="play_tone",
state_timer=1,
state_change_conditions={
"Tup": "bad_tone",
"BNC2High": "exit",
"BNC2Low": "exit",
},
output_actions=[tph.out_tone],
)
sma.add_state(
state_name="bad_tone",
state_timer=0,
state_change_conditions={"Tup": "exit"},
output_actions=[],
)
# Send state machine description to Bpod device
bpod.send_state_machine(sma)
# Run state machine
if not bpod.run_state_machine(sma): # Locks until state machine 'exit' is reached
break
trial_data = tph.trial_completed(bpod.session.current_trial.export())
bad_tone_state = trial_data["behavior_data"]["States timestamps"]["bad_tone"]
bad_stim_state = trial_data["behavior_data"]["States timestamps"]["bad_stim"]
if not np.all(np.isnan(bad_stim_state)):
bad_stim_count += 1
log.warning(f"Missing stims: {bad_stim_count}")
if not np.all(np.isnan(bad_tone_state)):
bad_tone_count += 1
log.warning(f"Missing tones: {bad_tone_count}")
sph.check_data()
bpod.close()
if __name__ == "__main__":
print("main")
|
py | 1a36d8704916aace6cedf6022c6393490536443a | """
A pseudo model that imports the 'weapon_mastery' dict from the A&I
asset dictionary (/app/assets/abilities_and_impairments.py).
"""
from app.assets import abilities_and_impairments
from app import models
class Assets(models.AssetCollection):
def __init__(self, *args, **kwargs):
self.assets = abilities_and_impairments.weapon_mastery
self.type_override = "weapon_mastery"
models.AssetCollection.__init__(self, *args, **kwargs)
|
py | 1a36d9339264b12b059e276cd73ba69a560ce320 | # qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.z(input_qubit[1]) # number=24
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[1]) # number=18
prog.rx(2.8902652413026093,input_qubit[2]) # number=13
prog.y(input_qubit[1]) # number=11
prog.y(input_qubit[1]) # number=12
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2133.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a36d9e02af78085269a710f494f7ce60fea4721 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMerchantIotDeviceBindResponse(AlipayResponse):
def __init__(self):
super(AlipayMerchantIotDeviceBindResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayMerchantIotDeviceBindResponse, self).parse_response_content(response_content)
|
py | 1a36dc0274c038f219261a914eb93795d481c814 | import os
import sys
import mxnet as mx
import logging
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
logging.basicConfig(level=logging.DEBUG)
data_dir_path = patch_path('data/flowers')
output_dir_path = patch_path('models')
batch_size = 8
epochs = 100
ctx = mx.cpu() # gpu too expensive for my graphics card due to the (224, 224) size, has to switch to cpu
from mxnet_text_to_image.library.dcgan1 import DCGan
from mxnet_text_to_image.data.flowers import get_data_iter
from mxnet_text_to_image.data.flowers_images import get_image_features
train_data = get_data_iter(data_dir_path=data_dir_path,
batch_size=batch_size,
limit=1000,
text_mode='add')
image_feats_dict = get_image_features(data_dir_path=os.path.join(data_dir_path, 'jpg'), model_ctx=ctx,
image_width=224, image_height=224)
gan = DCGan(model_ctx=ctx)
gan.random_input_size = 20 # random input is 20, text input is 300
gan.fit(train_data=train_data, image_feats_dict=image_feats_dict, model_dir_path=output_dir_path,
epochs=epochs, batch_size=batch_size)
if __name__ == '__main__':
main()
|
py | 1a36dc42b40e942de0d9517b906455e24b4de239 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class ColourBarsTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'color_bar_standard': 'ColorBarStandardType',
'tolerance': 'int',
'time_range_enabled': 'bool',
'start_time': 'float',
'end_time': 'float',
'range_tolerance': 'float',
'time_secs_or_frames': 'SecsOrFramesType',
'not_at_any_other_time': 'bool',
'reject_on_error': 'bool',
'do_correction': 'bool',
'checked': 'bool'
}
attribute_map = {
'color_bar_standard': 'color_bar_standard',
'tolerance': 'tolerance',
'time_range_enabled': 'time_range_enabled',
'start_time': 'start_time',
'end_time': 'end_time',
'range_tolerance': 'range_tolerance',
'time_secs_or_frames': 'time_secs_or_frames',
'not_at_any_other_time': 'not_at_any_other_time',
'reject_on_error': 'reject_on_error',
'do_correction': 'do_correction',
'checked': 'checked'
}
def __init__(self, color_bar_standard=None, tolerance=None, time_range_enabled=None, start_time=None, end_time=None, range_tolerance=None, time_secs_or_frames=None, not_at_any_other_time=None, reject_on_error=None, do_correction=None, checked=None, local_vars_configuration=None): # noqa: E501
"""ColourBarsTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._color_bar_standard = None
self._tolerance = None
self._time_range_enabled = None
self._start_time = None
self._end_time = None
self._range_tolerance = None
self._time_secs_or_frames = None
self._not_at_any_other_time = None
self._reject_on_error = None
self._do_correction = None
self._checked = None
self.discriminator = None
if color_bar_standard is not None:
self.color_bar_standard = color_bar_standard
if tolerance is not None:
self.tolerance = tolerance
if time_range_enabled is not None:
self.time_range_enabled = time_range_enabled
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if range_tolerance is not None:
self.range_tolerance = range_tolerance
if time_secs_or_frames is not None:
self.time_secs_or_frames = time_secs_or_frames
if not_at_any_other_time is not None:
self.not_at_any_other_time = not_at_any_other_time
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if do_correction is not None:
self.do_correction = do_correction
if checked is not None:
self.checked = checked
@property
def color_bar_standard(self):
"""Gets the color_bar_standard of this ColourBarsTest. # noqa: E501
:return: The color_bar_standard of this ColourBarsTest. # noqa: E501
:rtype: ColorBarStandardType
"""
return self._color_bar_standard
@color_bar_standard.setter
def color_bar_standard(self, color_bar_standard):
"""Sets the color_bar_standard of this ColourBarsTest.
:param color_bar_standard: The color_bar_standard of this ColourBarsTest. # noqa: E501
:type: ColorBarStandardType
"""
self._color_bar_standard = color_bar_standard
@property
def tolerance(self):
"""Gets the tolerance of this ColourBarsTest. # noqa: E501
:return: The tolerance of this ColourBarsTest. # noqa: E501
:rtype: int
"""
return self._tolerance
@tolerance.setter
def tolerance(self, tolerance):
"""Sets the tolerance of this ColourBarsTest.
:param tolerance: The tolerance of this ColourBarsTest. # noqa: E501
:type: int
"""
self._tolerance = tolerance
@property
def time_range_enabled(self):
"""Gets the time_range_enabled of this ColourBarsTest. # noqa: E501
:return: The time_range_enabled of this ColourBarsTest. # noqa: E501
:rtype: bool
"""
return self._time_range_enabled
@time_range_enabled.setter
def time_range_enabled(self, time_range_enabled):
"""Sets the time_range_enabled of this ColourBarsTest.
:param time_range_enabled: The time_range_enabled of this ColourBarsTest. # noqa: E501
:type: bool
"""
self._time_range_enabled = time_range_enabled
@property
def start_time(self):
"""Gets the start_time of this ColourBarsTest. # noqa: E501
:return: The start_time of this ColourBarsTest. # noqa: E501
:rtype: float
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ColourBarsTest.
:param start_time: The start_time of this ColourBarsTest. # noqa: E501
:type: float
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ColourBarsTest. # noqa: E501
:return: The end_time of this ColourBarsTest. # noqa: E501
:rtype: float
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ColourBarsTest.
:param end_time: The end_time of this ColourBarsTest. # noqa: E501
:type: float
"""
self._end_time = end_time
@property
def range_tolerance(self):
"""Gets the range_tolerance of this ColourBarsTest. # noqa: E501
:return: The range_tolerance of this ColourBarsTest. # noqa: E501
:rtype: float
"""
return self._range_tolerance
@range_tolerance.setter
def range_tolerance(self, range_tolerance):
"""Sets the range_tolerance of this ColourBarsTest.
:param range_tolerance: The range_tolerance of this ColourBarsTest. # noqa: E501
:type: float
"""
self._range_tolerance = range_tolerance
@property
def time_secs_or_frames(self):
"""Gets the time_secs_or_frames of this ColourBarsTest. # noqa: E501
:return: The time_secs_or_frames of this ColourBarsTest. # noqa: E501
:rtype: SecsOrFramesType
"""
return self._time_secs_or_frames
@time_secs_or_frames.setter
def time_secs_or_frames(self, time_secs_or_frames):
"""Sets the time_secs_or_frames of this ColourBarsTest.
:param time_secs_or_frames: The time_secs_or_frames of this ColourBarsTest. # noqa: E501
:type: SecsOrFramesType
"""
self._time_secs_or_frames = time_secs_or_frames
@property
def not_at_any_other_time(self):
"""Gets the not_at_any_other_time of this ColourBarsTest. # noqa: E501
:return: The not_at_any_other_time of this ColourBarsTest. # noqa: E501
:rtype: bool
"""
return self._not_at_any_other_time
@not_at_any_other_time.setter
def not_at_any_other_time(self, not_at_any_other_time):
"""Sets the not_at_any_other_time of this ColourBarsTest.
:param not_at_any_other_time: The not_at_any_other_time of this ColourBarsTest. # noqa: E501
:type: bool
"""
self._not_at_any_other_time = not_at_any_other_time
@property
def reject_on_error(self):
"""Gets the reject_on_error of this ColourBarsTest. # noqa: E501
:return: The reject_on_error of this ColourBarsTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this ColourBarsTest.
:param reject_on_error: The reject_on_error of this ColourBarsTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def do_correction(self):
"""Gets the do_correction of this ColourBarsTest. # noqa: E501
:return: The do_correction of this ColourBarsTest. # noqa: E501
:rtype: bool
"""
return self._do_correction
@do_correction.setter
def do_correction(self, do_correction):
"""Sets the do_correction of this ColourBarsTest.
:param do_correction: The do_correction of this ColourBarsTest. # noqa: E501
:type: bool
"""
self._do_correction = do_correction
@property
def checked(self):
"""Gets the checked of this ColourBarsTest. # noqa: E501
:return: The checked of this ColourBarsTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this ColourBarsTest.
:param checked: The checked of this ColourBarsTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ColourBarsTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ColourBarsTest):
return True
return self.to_dict() != other.to_dict()
|
py | 1a36de14eb2846c360de904e9af27e57a1cfd0a6 | from django.db import models
from django.urls import reverse
from django.utils import timezone
from account.models import User
from .extensions.utils import convert_to_jalali
class CategoryManager(models.Manager):
def available(self):
return self.filter(status=True)
def all_children(self, category):
out = []
if not out:
out.append(category)
for child_cat in category.children.all():
out += self.all_children(child_cat)
return out
def available_children(self, category):
out = []
if not out and category.status:
out.append(category)
for child_cat in category.children.all():
if child_cat.status:
out += self.available_children(child_cat)
return out
class Category(models.Model):
title = models.CharField(max_length=200, verbose_name='عنوان')
slug = models.SlugField(max_length=100, unique=True, verbose_name='لینک')
thumbnail = models.ImageField(upload_to='categories/images', verbose_name='تصویر اصلی', blank=True)
parent = models.ForeignKey('self', blank=True, null=True, \
default=None, on_delete=models.SET_NULL, \
related_name='children', verbose_name='پدر')
position = models.SmallIntegerField(verbose_name='اولویت')
status = models.BooleanField(default=True, verbose_name='فعال باشد؟')
objects = CategoryManager()
def __str__(self):
return self.title
class Meta:
verbose_name = 'دستهبندی'
verbose_name_plural = 'دستهبندیها'
ordering = ['parent__id', 'position']
def slug_path(self):
cur_category = self
path_list = [cur_category.slug]
while cur_category.parent:
cur_category = cur_category.parent
path_list.insert(0, cur_category.slug)
return '/'.join(path_list)
class ArticleManager(models.Manager):
def published(self):
return self.filter(status='p')
class Article(models.Model):
STATUS_CHOICES = (
('d', 'پیشنویس'),
('p', 'منتشرشده'),
)
author = models.ForeignKey(User, null=True, on_delete=models.SET_NULL, related_name='articles', verbose_name='نویسنده')
title = models.CharField(max_length=200, verbose_name='عنوان')
slug = models.SlugField(max_length=100, unique=True, verbose_name='لینک')
description = models.TextField(verbose_name='متن')
thumbnail = models.ImageField(upload_to='articles/images', verbose_name='تصویر اصلی')
category = models.ManyToManyField(Category, related_name='articles', verbose_name='دستهبندی')
published = models.DateTimeField(default=timezone.now, verbose_name='زمان انتشار')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=1, choices=STATUS_CHOICES, verbose_name='وضعیت')
objects = ArticleManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('account:article-list')
class Meta:
verbose_name = 'مطلب'
verbose_name_plural = 'مطالب'
ordering = ['-published']
def jpublished(self):
return convert_to_jalali(self.published)
jpublished.short_description = published.verbose_name |
py | 1a36de2313874032b7ba097dd471b47deee60900 | import sys
import tkinter as tk
import myNotebook as nb
from config import config
import utils
import overlay
this = sys.modules[__name__]
def plugin_start3(plugin_dir):
return plugin_start()
def plugin_start():
"""
Load EliteHIS plugin
"""
#Initialize target coordinates and planet radius
this.targetLat = tk.DoubleVar(value=0)
this.targetLong = tk.DoubleVar(value=0)
this.planetRadius = tk.IntVar(value=0)
print("EliteHIS: loaded")
return "EliteHIS"
def plugin_stop():
"""
Stop EliteHIS plugin
"""
print("EliteHIS: stopped")
def plugin_prefs(parent, cmdr, is_beta):
"""
Create settings dialog
"""
frame = nb.Frame(parent)
nb.Label(frame, text="Latitude").grid(row=0)
nb.Entry(frame, textvariable=this.targetLat).grid(row=0, column=1)
nb.Label(frame, text="Longitude").grid(row=1)
nb.Entry(frame, textvariable=this.targetLong).grid(row=1, column=1)
nb.Label(frame, text="Planet radius (in metres)").grid(row=2)
nb.Entry(frame, textvariable=this.planetRadius).grid(row=2, column=1)
return frame
def dashboard_entry(cmdr, is_beta, entry):
"""
Called when something on the cockpit display changes
"""
#Lat/Long not always in status.json
try:
currentLat = entry["Latitude"]
currentLong = entry["Longitude"]
altitude = entry["Altitude"]
recalculate_info(currentLat, currentLong, altitude)
except KeyError:
pass
def recalculate_info(currentLat, currentLong, altitude):
"""
Recalculate target heading
"""
heading = utils.calc_heading(currentLat, currentLong, this.targetLat.get(), this.targetLong.get())
overlay.show_heading(heading)
if this.planetRadius.get() > 0:
distance = utils.calc_distance(currentLat, currentLong, this.targetLat.get(), this.targetLong.get(), this.planetRadius.get(), altitude)
overlay.show_distance(distance)
|
py | 1a36dea0d2b3c000dce6b00b7f619ccf91e62bd2 | from unittest.mock import Mock, patch
from django.test import TestCase
from data_refinery_common import utils
from data_refinery_common.models import Pipeline
class UtilsTestCase(TestCase):
@patch('data_refinery_common.utils.requests.get')
def test_get_instance_id_cloud(self, mock_get):
"""Test that a request is made and the global value is stored"""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
mock_get.return_value = Mock(ok=True)
mock_get.return_value.text = "instance_id"
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "instance_id")
# Ensure that the second call uses the now-set global value.
# (By resetting the mocks, calling it again, and checking that
# the values didn't need to be set again).
mock_get.reset_mock()
utils.get_instance_id()
mock_get.assert_not_called()
def test_get_instance_id_local(self):
"""Test that local is used for instance id."""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
with self.settings(RUNNING_IN_CLOUD=False):
self.assertEqual(utils.get_instance_id(), "local")
# Ensure that the second call uses the now-set global value
# by changing what settings would tell it.
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "local")
def test_supported_microarray_platforms(self):
"""Test that supported microarray platforms setting is set correctly."""
supported_microarray_platforms = utils.get_supported_microarray_platforms()
has_equgene11st = False
has_A_AFFY_59 = False
has_GPL23026 = False
has_AGEOD23026 = False
for platform in supported_microarray_platforms:
if platform["platform_accession"] == "equgene11st" and platform["is_brainarray"]:
has_equgene11st = True
if platform["external_accession"] == "A-AFFY-59" and not platform["is_brainarray"]:
has_A_AFFY_59 = True
if platform["external_accession"] == "GPL23026" and not platform["is_brainarray"]:
has_GPL23026 = True
if platform["external_accession"] == "A-GEOD-23026" and not platform["is_brainarray"]:
has_AGEOD23026 = True
self.assertTrue(has_equgene11st)
self.assertTrue(has_A_AFFY_59)
self.assertTrue(has_GPL23026)
self.assertTrue(has_AGEOD23026)
def test_get_internal_microarray_accession(self):
"""Test that supported microarray platforms setting is set correctly."""
self.assertEqual(utils.get_internal_microarray_accession("hgu133a"), "hgu133a")
self.assertEqual(utils.get_internal_microarray_accession("A-AFFY-59"), "soybean")
self.assertEqual(utils.get_internal_microarray_accession("GPL23026"), "Illumina_HumanHT-12_V4.0")
def test_supported_rnaseq_platforms(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertTrue("Illumina HiSeq 1000" in utils.get_supported_rnaseq_platforms())
def test_readable_affymetrix_names(self):
"""Test that the setting for Affymetrix accessions to
human readable names is set correctly."""
readable_platform_names = utils.get_readable_affymetrix_names()
expected_readable_name = "[ChiGene-1_0-st] Affymetrix Chicken Gene 1.0 ST Array"
self.assertTrue(readable_platform_names["chigene10st"] == expected_readable_name)
expected_readable_name = "[Xenopus_laevis] Affymetrix Xenopus laevis Genome Array"
self.assertTrue(readable_platform_names["xenopuslaevis"] == expected_readable_name)
def test_get_normalized_platform(self):
""" Test a particular normaization we need to perform """
self.assertEqual(utils.get_normalized_platform("hugene10stv1"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("hugene10stv2"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("stv1hugene10"), "stv1hugene10")
def test_volume_index(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertEqual(utils.get_volume_index(), "0")
with open('/tmp/VOLUME_INDEX', 'wb') as f:
f.write("123".encode())
self.assertEqual(utils.get_volume_index(path='/tmp/VOLUME_INDEX'), "123")
def test_load_blacklist(self):
blacklist = utils.load_blacklist()
self.assertEqual(len(blacklist), 239449)
def test_queryset_iterator(self):
"""Test that the queryset iterator by using it to actually iterate over a queryset.
Uses Pipeline just because it's easy to init."""
# Page size defaults to 2000, so use something bigger than
# that so there's more than one page.
for i in range(3000):
Pipeline(name=str(i)).save()
pipelines = Pipeline.objects.all()
# Build a list of the names just to do something with the data
# so we know the query actually resolved.
names = []
for pipeline in utils.queryset_iterator(pipelines):
names.append(pipeline.name)
self.assertEqual(len(names), 3000)
|
py | 1a36e025cfd783c53474ff1d5d37ff2fd14f73ae | from osgeo import gdal
import numpy as np
import iterm
import sys
f= sys.argv[1] # '/home/antor/sat/fMoW-full/train/airport/airport_228/airport_228_4_ms.tif' #test/0001826/0001826_0_ms.tif'
ds = gdal.Open(f)
print ds.GetMetadata()
def to_uint8_raster(a):
_min, _max = a.min(), a.max()
print(a.shape, _min, _max)
return (255. * (a - _min) / (_max - _min) ).astype(np.uint8)
# loop through each band
for bi in range(ds.RasterCount):
band = ds.GetRasterBand(bi + 1)
# Read this band into a 2D NumPy array
ar = band.ReadAsArray()
#iterm.show_image(to_uint8_raster(ar))
print('Band %d has type %s and shape (%d, %d)'% (bi + 1, ar.dtype, ar.shape[0], ar.shape[1]))
raw = ar.tostring()
|
py | 1a36e0703ac3e6c7cd455958f8808f19a0419842 | # -*- coding: utf-8 -*-
"""Global configuration objects.
This module contains boilerplate configuration objects for storing and loading
configuration state.
"""
from __future__ import division
import os
import numpy as np
import pytoml as toml
import six
class BaseConfig(object):
"""Base class for configuration objects.
String representation yields TOML that should parse back to a dictionary
that will initialize the same configuration object.
"""
def __str__(self):
sanitized = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, np.ndarray):
sanitized[k] = v.tolist()
else:
sanitized[k] = v
return toml.dumps(sanitized)
__repr__ = __str__
class VolumeConfig(BaseConfig):
"""Configuration for the use of volumes.
Attributes
----------
resolution : sequence or ndarray of float
Resolution to which volumes will be downsampled before processing.
label_downsampling : str
Method for downsampling label masks. One of 'majority' or 'conjunction'.
"""
def __init__(self, settings):
self.resolution = np.array(settings.get('resolution', [1, 1, 1]))
self.label_downsampling = str(settings.get('label_downsampling', 'majority'))
class ModelConfig(BaseConfig):
"""Configuration for non-network aspects of the flood filling model.
Attributes
----------
input_fov_shape : sequence or ndarray of int
Input field of view shape in voxels for each flood filling move.
output_fov_shape : sequence or ndarray of int
Output field of view shape in voxels for each flood filling move. Can
not be larger than ``input_fov_shape``.
output_fov_move_fraction : int
Move size as a fraction of the output field of view shape.
v_true, v_false : float
Soft target values for in-object and out-of-object mask voxels,
respectively.
t_move : float
Threshold mask probability in the move check plane to queue a move
to that position.
t_final : float, optional
Threshold mask probability to produce the final segmentation. Defaults
to ``t_move``.
move_check_thickness : int
Thickness of move check plane in voxels. Setting this greater than 1
is useful to make moves more robust even if the move grid aligns with
missing sections or image artifacts.
move_priority : str
How to prioritize the move queue. Either 'descending' to order by
descending mask probability in the move check plane (default),
'proximity' to prioritize moves minimizing L1 path distance from the
seed, or 'random'.
move_recheck : bool
If true, when moves are retrieved from the queue a cube in the
probability mask will be checked around the move location. If no voxels
in this cube are greater than the move threshold, the move will be
skipped. The cube size is one move step in each direction.
training_subv_shape : sequence or ndarray of int, optional
Shape of the subvolumes used during moving training.
validation_subv_shape : sequence or ndarray of int, optional
Shape of the subvolumes used during training validation.
"""
def __init__(self, settings):
self.input_fov_shape = np.array(settings.get('input_fov_shape', [17, 33, 33]))
self.output_fov_shape = np.array(settings.get('output_fov_shape', [17, 33, 33]))
self.output_fov_move_fraction = int(settings.get('output_fov_move_fraction', 4))
self.v_true = float(settings.get('v_true', 0.95))
self.v_false = float(settings.get('v_false', 0.05))
self.t_move = float(settings.get('t_move', 0.9))
self.t_final = float(settings.get('t_final', self.t_move))
self.move_check_thickness = int(settings.get('move_check_thickness', 1))
self.move_priority = str(settings.get('move_priority', 'descending'))
self.move_recheck = bool(settings.get('move_recheck', True))
self.training_subv_shape = np.array(settings.get('training_subv_shape',
self.input_fov_shape + self.move_step * 2))
self.validation_subv_shape = np.array(settings.get('validation_subv_shape',
self.input_fov_shape + self.move_step * 4))
@property
def move_step(self):
return (self.output_fov_shape - 1) // self.output_fov_move_fraction
def subv_moves(self, shape):
return np.prod((shape - self.input_fov_shape) // self.move_step + 1)
@property
def training_subv_moves(self):
return self.subv_moves(self.training_subv_shape)
@property
def validation_subv_moves(self):
return self.subv_moves(self.validation_subv_shape)
class NetworkConfig(BaseConfig):
"""Configuration for the flood filling network architecture.
Attributes
----------
factory : str
Module and function name for a factory method for creating the flood
filling network. This allows a custom architecture to be provided
without needing to modify diluvian.
transpose : bool
If true, any loaded networks will reverse the order of axes for both
inputs and outputs. Data is assumed to be ZYX row-major, but old
versions of diluvian used XYZ, so this is necessary to load old
networks.
rescale_image : bool
If true, rescale the input image intensity from [0, 1) to [-1, 1).
num_modules : int
Number of convolution modules to use, each module consisting of a skip
link in parallel with ``num_layers_per_module`` convolution layers.
num_layers_per_module : int
Number of layers to use in each organizational module, e.g., the
number of convolution layers in each convolution module or the number
of convolution layers before and after each down- and up-sampling
respectively in a U-Net level.
convolution_dim : sequence or ndarray of int
Shape of the convolution for each layer.
convolution_filters : int
Number of convolution filters for each layer.
convolution_activation : str
Name of the Keras activation function to apply after convolution layers.
convolution_padding : str
Name of the padding mode for convolutions, either 'same' (default) or
'valid'.
initialization : str
Name of the Keras initialization function to use for weight
initialization of all layers.
output_activation : str
Name of the Keras activation function to use for the final network
output.
dropout_probability : float
Probability for dropout layers. If zero, no dropout layers will be
included.
batch_normalization : bool
Whether to apply batch normalization. Note that in included networks
normalization is applied after activation, rather than before as in the
original paper, because this is now more common practice.
unet_depth : int
For U-Net models, the total number of downsampled levels in the network.
unet_downsample_rate : sequence or ndarray of int
The frequency in levels to downsample each axis. For example, a standard
U-Net downsamples all axes at each level, so this value would be all
ones. If data is anisotropic and Z should only be downsampled every
other level, this value could be [2, 1, 1]. Axes set to 0 are never
downsampled.
"""
def __init__(self, settings):
self.factory = str(settings.get('factory'))
self.transpose = bool(settings.get('transpose', False))
self.rescale_image = bool(settings.get('rescale_image', False))
self.num_modules = int(settings.get('num_modules', 8))
self.num_layers_per_module = int(settings.get('num_layers_per_module', 2))
self.convolution_dim = np.array(settings.get('convolution_dim', [3, 3, 3]))
self.convolution_filters = int(settings.get('convolution_filters', 32))
self.convolution_activation = str(settings.get('convolution_activation', 'relu'))
self.convolution_padding = str(settings.get('convolution_padding', 'same'))
self.initialization = str(settings.get('initialization', 'glorot_uniform'))
self.output_activation = str(settings.get('output_activation', 'sigmoid'))
self.dropout_probability = float(settings.get('dropout_probability', 0.0))
self.batch_normalization = bool(settings.get('batch_normalization', False))
self.unet_depth = int(settings.get('unet_depth', 4))
self.unet_downsample_rate = np.array(settings.get('unet_downsample_rate', [1, 1, 1]))
class OptimizerConfig(BaseConfig):
"""Configuration for the network optimizer.
Any settings dict entries passed to this initializer will be added as
configuration attributes and passed to the optimizer initializer as keyword
arguments.
Attributes
----------
klass : str
Class name of the Keras optimizer to use.
loss : str
Name of the Keras loss function to use.
"""
def __init__(self, settings):
for k, v in six.iteritems(settings):
if k != 'klass' and k != 'loss':
setattr(self, k, v)
self.klass = str(settings.get('klass', 'SGD'))
self.loss = str(settings.get('loss', 'binary_crossentropy'))
class TrainingConfig(BaseConfig):
"""Configuration for model training.
Attributes
----------
num_gpus : int
Number of GPUs to use for data-parallelism.
num_workers : int
Number of worker queues to use for generating training data.
gpu_batch_size : int
Per-GPU batch size. The effective batch size will be this times
``num_gpus``.
training_size : int
Number of samples to use for training **from each volume**.
validation_size : int
Number of samples to use for validation **from each volume**.
total_epochs : int
Maximum number of training epochs.
reset_generators : bool
Reset training generators after each epoch, so that the training
examples at each epoch are identical.
fill_factor_bins : sequence of float
Bin boundaries for filling fractions. If provided, sample loss will be
weighted to increase loss contribution from less-frequent bins.
Otherwise all samples are weighted equally.
partitions : dict
Dictionary mapping volume name regexes to a sequence of int indicating
number of volume partitions along each axis. Only one axis should be
greater than 1. Each volume should match at most one regex.
training_partition, validation_partition : dict
Dictionaries mapping volume name regexes to a sequence of int indicating
index of the partitions to use for training and validation,
respectively. Each volume should match at most one regex.
validation_metric : dict
Module and function name for a metric function taking a true and
predicted region mask ('metric'). Boolean of whether to threshold the
mask for the metric (true) or use the mask and target probabilities
('threshold').
String 'min' or 'max'for how to choose best validation metric value
('mode').
patience : int
Number of epochs after the last minimal validation loss to terminate
training.
early_abort_epoch : int
If provided, training will check at the end of this epoch
whether validation loss is less than ``early_abort_loss``. If not,
training will be aborted, and may be restarted with a new seed
depending on CLI options. By default this is disabled.
early_abort_loss : float
See ``early_abort_epoch``.
label_erosion : sequence or ndarray of int
Amount to erode label mask for each training subvolume in each
dimension, in pixels. For example, a value of [0, 1, 1] will result
in erosion with a structuring element of size [1, 3, 3].
relabel_seed_component : bool
Relabel training subvolumes to only include the seeded connected
component.
augment_validation : bool
Whether validation data should also be augmented.
augment_use_both : bool
Whether to sequentially use both the augmented and unaugmented version
of each subvolume.
augment_mirrors : sequence of int
Axes along which to mirror for data augmentation.
augment_permute_axes : sequence of sequence of int
Axis permutations to use for data augmentation.
augment_missing_data : list of dict
List of dictionaries with ``axis`` and ``prob`` keys, indicating
an axis to perform data blanking along, and the probability to blank
each plane in the axis, respectively.
augment_noise : list of dict
List of dictionaries with ``axis``, ``mul`` and `add`` keys, indicating
an axis to perform independent Gaussian noise augmentation on, and the
standard deviations of 1-mean multiplicative and 0-mean additive noise,
respectively.
augment_contrast : list of dict
List of dictionaries with ``axis``, ``prob``, ``scaling_mean``,
``scaling_std``, ``center_mean`` and ``center_std`` keys. These
specify the probability to alter the contrast of a section, the mean
and standard deviation to draw from a normal distribution to scale
contrast, and the mean and standard deviation to draw from a normal
distribution to move the intensity center multiplicatively.
augment_missing_data : list of dict
List of dictionaries with ``axis``, ``prob`` and ``volume_file``
keys, indicating an axis to perform data artifacting along, the
probability to add artifacts to each plane in the axis, and the
volume configuration file from which to draw artifacts, respectively.
"""
def __init__(self, settings):
self.num_gpus = int(settings.get('num_gpus', 1))
self.num_workers = int(settings.get('num_workers', 4))
self.gpu_batch_size = int(settings.get('gpu_batch_size', 8))
self.batch_size = self.num_gpus * self.gpu_batch_size
self.training_size = int(settings.get('training_size', 256))
self.validation_size = int(settings.get('validation_size', 256))
self.total_epochs = int(settings.get('total_epochs', 100))
self.reset_generators = bool(settings.get('reset_generators', False))
self.fill_factor_bins = settings.get('fill_factor_bins', None)
if self.fill_factor_bins is not None:
self.fill_factor_bins = np.array(self.fill_factor_bins)
self.partitions = settings.get('partitions', {'.*': [2, 1, 1]})
self.training_partition = settings.get('training_partition', {'.*': [0, 0, 0]})
self.validation_partition = settings.get('validation_partition', {'.*': [1, 0, 0]})
self.validation_metric = settings.get(
'validation_metric',
{'metric': 'diluvian.util.binary_f_score', 'threshold': True, 'mode': 'max', 'args': {'beta': 0.5}})
self.patience = int(np.array(settings.get('patience', 10)))
self.early_abort_epoch = settings.get('early_abort_epoch', None)
self.early_abort_loss = settings.get('early_abort_loss', None)
self.label_erosion = np.array(settings.get('label_erosion', [0, 1, 1]), dtype=np.int64)
self.relabel_seed_component = bool(settings.get('relabel_seed_component', False))
self.augment_validation = bool(settings.get('augment_validation', True))
self.augment_use_both = bool(settings.get('augment_use_both', True))
self.augment_mirrors = [int(x) for x in settings.get('augment_mirrors', [0, 1, 2])]
self.augment_permute_axes = settings.get('augment_permute_axes', [[0, 2, 1]])
self.augment_missing_data = settings.get('augment_missing_data', [{'axis': 0, 'prob': 0.01}])
self.augment_noise = settings.get('augment_noise', [{'axis': 0, 'mul': 0.1, 'add': 0.1}])
self.augment_contrast = settings.get(
'augment_contrast',
[{'axis': 0, 'prob': 0.05, 'scaling_mean': 0.5, 'scaling_std': 0.1,
'center_mean': 1.2, 'center_std': 0.2}])
self.augment_artifacts = settings.get('augment_artifacts', [])
class PostprocessingConfig(BaseConfig):
"""Configuration for segmentation processing after flood filling.
Attributes
----------
closing_shape : sequence or ndarray of int
Shape of the structuring element for morphological closing, in voxels.
"""
def __init__(self, settings):
self.closing_shape = settings.get('closing_shape', None)
class Config(object):
"""A complete collection of configuration objects.
Attributes
----------
random_seed : int
Seed for initializing the Python and NumPy random generators.
"""
def __init__(self, settings_collection=None):
if settings_collection is not None:
settings = settings_collection[0].copy()
for s in settings_collection:
for c in s:
if c in settings and isinstance(settings[c], dict):
settings[c].update(s[c])
else:
settings[c] = s[c]
else:
settings = {}
self.volume = VolumeConfig(settings.get('volume', {}))
self.model = ModelConfig(settings.get('model', {}))
self.network = NetworkConfig(settings.get('network', {}))
self.optimizer = OptimizerConfig(settings.get('optimizer', {}))
self.training = TrainingConfig(settings.get('training', {}))
self.postprocessing = PostprocessingConfig(settings.get('postprocessing', {}))
self.random_seed = int(settings.get('random_seed', 0))
def __str__(self):
sanitized = {}
for n, c in six.iteritems(self.__dict__):
if not isinstance(c, BaseConfig):
sanitized[n] = c
continue
sanitized[n] = {}
for k, v in six.iteritems(c.__dict__):
if isinstance(v, np.ndarray):
sanitized[n][k] = v.tolist()
else:
sanitized[n][k] = v
return toml.dumps(sanitized)
def from_toml(self, *filenames):
"""Reinitializes this Config from a list of TOML configuration files.
Existing settings are discarded. When multiple files are provided,
configuration is overridden by later files in the list.
Parameters
----------
filenames : interable of str
Filenames of TOML configuration files to load.
"""
settings = []
for filename in filenames:
with open(filename, 'rb') as fin:
settings.append(toml.load(fin))
return self.__init__(settings)
def to_toml(self, filename):
with open(filename, 'w') as tomlfile:
tomlfile.write(str(self))
CONFIG = Config()
CONFIG.from_toml(os.path.join(os.path.dirname(__file__), 'conf', 'default.toml'))
|
py | 1a36e0ae0b68038a5b9bb66436c7434e94a12f99 | from functools import wraps
def save_status_to_session(strategy, pipeline_index, *args, **kwargs):
"""Saves current social-auth status to session."""
strategy.session_set('partial_pipeline',
strategy.to_session(pipeline_index + 1,
*args, **kwargs))
def partial(func):
@wraps(func)
def wrapper(strategy, pipeline_index, *args, **kwargs):
values = strategy.to_session(pipeline_index, *args, **kwargs)
strategy.session_set('partial_pipeline', values)
return func(strategy=strategy, pipeline_index=pipeline_index,
*args, **kwargs)
return wrapper
|
py | 1a36e0c4c15d1df02a6dc8afe55378cec29b6440 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from aiida.orm.data import to_aiida_type, BaseType
def _left_operator(func):
def inner(self, other):
l = self.value
if isinstance(other, NumericType):
r = other.value
else:
r = other
return to_aiida_type(func(l, r))
return inner
def _right_operator(func):
def inner(self, other):
assert not isinstance(other, NumericType)
return to_aiida_type(func(self.value, other))
return inner
class NumericType(BaseType):
"""
Specific subclass of :py:class:`aiida.orm.data.BaseType` to store numbers,
overloading common operators (``+``, ``*``, ...)
"""
@_left_operator
def __add__(self, other):
return self + other
@_right_operator
def __radd__(self, other):
return other + self
@_left_operator
def __sub__(self, other):
return self - other
@_right_operator
def __rsub__(self, other):
return other - self
@_left_operator
def __mul__(self, other):
return self * other
@_right_operator
def __rmul__(self, other):
return other * self
@_left_operator
def __pow__(self, power):
return self ** power
@_left_operator
def __lt__(self, other):
return self < other
@_left_operator
def __le__(self, other):
return self <= other
@_left_operator
def __gt__(self, other):
return self > other
@_left_operator
def __ge__(self, other):
return self >= other
@_left_operator
def __mod__(self, other):
return self % other
@_right_operator
def __rmod__(self, other):
return other % self
def __float__(self):
return float(self.value)
def __int__(self):
return int(self.value) |
py | 1a36e0f7eacf93682587fef428ee4c179f47485e | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import numpy
import os
import torch
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, AutoModel
from benchmark_helper import create_onnxruntime_session, Precision
from gpt2_helper import GPT2ModelNoPastState, PRETRAINED_GPT2_MODELS
from quantize_helper import QuantizeHelper
from huggingface_models import MODEL_CLASSES
logger = logging.getLogger(__name__)
# Walkaround by replacing torch.triu using self-defined op
# Since torch.triu cannot be exported to ONNX. See https://github.com/pytorch/pytorch/issues/32968
torch_func = {"triu": torch.triu}
def triu_onnx(x, diagonal=0, out=None):
assert out is None
assert len(x.shape) == 2 and x.size(0) == x.size(1)
torch_triu = torch_func["triu"]
template = torch_triu(torch.ones((1024, 1024), dtype=torch.uint8), diagonal)
mask = template[:x.size(0), :x.size(1)]
return torch.where(mask.bool(), x, torch.zeros_like(x))
def replace_torch_functions():
torch.triu = triu_onnx
def restore_torch_functions():
torch.triu = torch_func["triu"]
def create_onnxruntime_input(vocab_size, batch_size, sequence_length, input_names, data_type=numpy.int64):
input_ids = numpy.random.randint(low=0, high=vocab_size - 1, size=(batch_size, sequence_length), dtype=data_type)
inputs = {'input_ids': input_ids}
if "attention_mask" in input_names:
attention_mask = numpy.ones([batch_size, sequence_length], dtype=data_type)
inputs['attention_mask'] = attention_mask
if "token_type_ids" in input_names:
segment_ids = numpy.zeros([batch_size, sequence_length], dtype=data_type)
inputs['token_type_ids'] = segment_ids
return inputs
def filter_inputs(inputs, input_names):
remaining_model_inputs = {}
for input_name in input_names:
remaining_model_inputs[input_name] = inputs[input_name]
return remaining_model_inputs
def flatten(inputs):
return [[flatten(i) for i in inputs] if isinstance(inputs, (list, tuple)) else inputs]
def update_flatten_list(inputs, res_list):
for i in inputs:
res_list.append(i) if not isinstance(i, (list, tuple)) else update_flatten_list(i, res_list)
return res_list
def build_dynamic_axes(example_inputs, outputs_flatten):
sequence_length = example_inputs["input_ids"].shape[-1]
dynamic_axes = {key: {0: 'batch_size', 1: 'seq_len'} for key in example_inputs.keys()}
output_names = ['output_' + str(i + 1) for i in range(len(outputs_flatten))]
for i, output_name in enumerate(output_names):
dynamic_axes[output_name] = {0: 'batch_size'}
dims = outputs_flatten[i].shape
for j, dim in enumerate(dims):
if dim == sequence_length:
dynamic_axes[output_name].update({j: 'seq_len'})
return dynamic_axes, output_names
def validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, fp16):
test_session = create_onnxruntime_session(onnx_model_path, use_gpu, enable_all_optimization=False)
if test_session is None:
logger.error(f"{onnx_model_path} is an invalid ONNX model")
return False
logger.info(f"{onnx_model_path} is a valid ONNX model")
# Compare the inference result with PyTorch or Tensorflow
example_ort_inputs = {k: t.cpu().numpy() for k, t in example_inputs.items()}
example_ort_outputs = test_session.run(None, example_ort_inputs)
if len(example_outputs_flatten) != len(example_ort_outputs):
logger.error(
f"Number of output tensors expected {len(example_outputs_flatten)}, got {len(example_ort_outputs)}")
return False
for i in range(len(example_outputs_flatten)):
abs_diff = numpy.amax(numpy.abs(example_ort_outputs[i] - example_outputs_flatten[i].cpu().numpy()))
if abs_diff > 1e-4:
logger.info(f"Max absolute diff={abs_diff} for output tensor {i}")
rtol = 5e-02 if fp16 else 1e-4
atol = 1e-01 if fp16 else 1e-4
if not numpy.allclose(example_ort_outputs[i], example_outputs_flatten[i].cpu(), rtol=rtol, atol=atol):
logger.error(f"Output tensor {i} is not close: rtol={rtol}, atol={atol}")
return False
logger.info(f"inference result of onnxruntime is validated on {onnx_model_path}")
return True
def get_onnx_file_path(onnx_dir: str, model_name: str, input_count: int, optimized_by_script: bool, use_gpu: bool,
precision: Precision, optimized_by_onnxruntime: bool, use_external_data: bool):
from re import sub
normalized_model_name = sub(r'[^a-zA-Z0-9_]', '_', model_name)
if not optimized_by_script:
filename = f"{normalized_model_name}_{input_count}"
else:
device = "gpu" if use_gpu else "cpu"
filename = f"{normalized_model_name}_{input_count}_{precision}_{device}"
if optimized_by_onnxruntime:
filename += f"_ort"
directory = onnx_dir
# ONNXRuntime will not write external data so the raw and optimized models shall be in same directory.
if use_external_data and not optimized_by_onnxruntime:
directory = os.path.join(onnx_dir, filename)
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, f"{filename}.onnx")
def add_filename_suffix(file_path: str, suffix: str) -> str:
"""
Append a suffix at the filename (before the extension).
Args:
path: pathlib.Path The actual path object we would like to add a suffix
suffix: The suffix to add
Returns: path with suffix appended at the end of the filename and before extension
"""
path = Path(file_path)
return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix))
def optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics):
if overwrite or not os.path.exists(ort_model_path):
Path(ort_model_path).parent.mkdir(parents=True, exist_ok=True)
from optimizer import optimize_by_onnxruntime, get_fusion_statistics
# Use onnxruntime to optimize model, which will be saved to *_ort.onnx
opt_model = optimize_by_onnxruntime(onnx_model_path,
use_gpu=use_gpu,
optimized_model_path=ort_model_path,
opt_level=99)
model_fusion_statistics[ort_model_path] = get_fusion_statistics(ort_model_path)
else:
logger.info(f"Skip optimization since model existed: {ort_model_path}")
def optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, num_attention_heads, hidden_size, use_gpu,
precision, use_raw_attention_mask, overwrite, model_fusion_statistics,
use_external_data_format):
if overwrite or not os.path.exists(optimized_model_path):
Path(optimized_model_path).parent.mkdir(parents=True, exist_ok=True)
from optimizer import optimize_model
from onnx_model_bert import BertOptimizationOptions
optimization_options = BertOptimizationOptions(model_type)
optimization_options.use_raw_attention_mask(use_raw_attention_mask)
if Precision.FLOAT16 == precision:
optimization_options.enable_gelu_approximation = True
if Precision.INT8 == precision:
optimization_options.enable_embed_layer_norm = False
# Use script to optimize model.
# Use opt_level <= 1 for models to be converted to fp16, because some fused op (like FusedGemm) has only fp32 and no fp16.
# It is better to be conservative so we use opt_level=0 here, in case MemcpyFromHost is added to the graph by OnnxRuntime.
opt_model = optimize_model(onnx_model_path,
model_type,
num_heads=num_attention_heads,
hidden_size=hidden_size,
opt_level=0,
optimization_options=optimization_options,
use_gpu=use_gpu,
only_onnxruntime=False)
if model_type == 'bert_keras':
opt_model.use_dynamic_axes()
model_fusion_statistics[optimized_model_path] = opt_model.get_fused_operator_statistics()
if Precision.FLOAT16 == precision:
opt_model.convert_model_float32_to_float16()
opt_model.save_model_to_file(optimized_model_path, use_external_data_format)
else:
logger.info(f"Skip optimization since model existed: {optimized_model_path}")
def modelclass_dispatcher(model_name, custom_model_class):
if (custom_model_class != None):
if (custom_model_class in MODEL_CLASSES):
return custom_model_class
else:
raise Exception("Valid model class: " + ' '.join(MODEL_CLASSES))
if model_name in PRETRAINED_GPT2_MODELS:
return "GPT2ModelNoPastState"
import re
if (re.search('-squad$', model_name) != None):
return "AutoModelForQuestionAnswering"
elif (re.search('-mprc$', model_name) != None):
return "AutoModelForSequenceClassification"
elif (re.search('gpt2', model_name) != None):
return "AutoModelWithLMHead"
return "AutoModel"
def load_pretrained_model(model_name, config, cache_dir, custom_model_class, is_tf_model=False):
model_class_name = modelclass_dispatcher(model_name, custom_model_class)
if model_class_name == "GPT2ModelNoPastState":
if is_tf_model:
raise NotImplementedError("TFGPT2ModelNoPastState is currently not supported.")
else:
return GPT2ModelNoPastState.from_pretrained(model_name, config=config, cache_dir=cache_dir)
if is_tf_model:
model_class_name = 'TF' + model_class_name
transformers_module = __import__("transformers", fromlist=[model_class_name])
model_class = getattr(transformers_module, model_class_name)
use_cdn = False if model_name == 't5-11b' else True
return model_class.from_pretrained(model_name, config=config, cache_dir=cache_dir, use_cdn=use_cdn)
def load_pt_model(model_name, model_class, cache_dir):
config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir)
if hasattr(config, 'return_dict'):
config.return_dict = False
model = load_pretrained_model(model_name, config=config, cache_dir=cache_dir, custom_model_class=model_class)
return config, model
def load_tf_model(model_name, model_class, cache_dir):
config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir)
model = load_pretrained_model(model_name,
config=config,
cache_dir=cache_dir,
custom_model_class=model_class,
is_tf_model=True)
return config, model
# For test only
def load_pt_model_from_tf(model_name):
# Note that we could get pt model from tf, but model source and its structure in this case is different from directly using
# load_pt_model() and load_tf_model() even with the same name. Therefore it should not be used for comparing with them
from convert_tf_models_to_pytorch import tf2pt_pipeline
config, model = tf2pt_pipeline(model_name)
return config, model
def validate_and_optimize_onnx(model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu,
precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, config,
model_fusion_statistics, onnx_model_path, example_inputs, example_outputs_flatten):
is_valid_onnx_model = True
if validate_onnx:
is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu,
False)
if optimize_onnx or precision == Precision.FLOAT16 or precision == Precision.INT8: # Use script (optimizer.py) to optimize
optimized_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), True, use_gpu, precision,
False, use_external_data_format)
optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, config.num_attention_heads,
config.hidden_size, use_gpu, precision, use_raw_attention_mask, overwrite,
model_fusion_statistics, use_external_data_format)
onnx_model_path = optimized_model_path
if validate_onnx:
is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu,
precision == Precision.FLOAT16)
if precision == Precision.INT8:
logger.info(f"Quantizing model: {onnx_model_path}")
QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_path, use_external_data_format)
logger.info(f"Finished quantizing model: {onnx_model_path}")
else: # Use OnnxRuntime to optimize
if is_valid_onnx_model:
ort_model_path = add_filename_suffix(onnx_model_path, '_ort')
optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics)
return onnx_model_path, is_valid_onnx_model, config.vocab_size
def export_onnx_model_from_pt(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir,
onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx,
use_raw_attention_mask, overwrite, model_fusion_statistics):
config, model = load_pt_model(model_name, model_class, cache_dir)
# config, model = load_pt_model_from_tf(model_name)
model.cpu()
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
max_input_size = tokenizer.max_model_input_sizes[
model_name] if model_name in tokenizer.max_model_input_sizes else 1024
example_inputs = tokenizer.encode_plus("This is a sample input", return_tensors="pt")
example_inputs = filter_inputs(example_inputs, input_names)
example_outputs = model(**example_inputs)
assert isinstance(example_outputs, (list, tuple)), f"type of output is not list or tuple: {type(example_outputs)}"
# Flatten is needed for gpt2 and distilgpt2.
example_outputs_flatten = flatten(example_outputs)
example_outputs_flatten = update_flatten_list(example_outputs_flatten, [])
onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False,
use_external_data_format)
if overwrite or not os.path.exists(onnx_model_path):
logger.info("Exporting ONNX model to {}".format(onnx_model_path))
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
dynamic_axes, output_names = build_dynamic_axes(example_inputs, example_outputs_flatten)
replace_torch_functions()
torch.onnx.export(model=model,
args=tuple(example_inputs.values()),
f=onnx_model_path,
input_names=list(example_inputs.keys()),
output_names=output_names,
example_outputs=example_outputs,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset_version,
use_external_data_format=use_external_data_format)
restore_torch_functions()
else:
logger.info(f"Skip export since model existed: {onnx_model_path}")
onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx(
model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx,
validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path,
example_inputs, example_outputs_flatten)
return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
def export_onnx_model_from_tf(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir,
onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx,
use_raw_attention_mask, overwrite, model_fusion_statistics):
# Use CPU to export
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU')
config, model = load_tf_model(model_name, model_class, cache_dir)
model._saved_model_inputs_spec = None
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
max_input_size = tokenizer.max_model_input_sizes[
model_name] if model_name in tokenizer.max_model_input_sizes else 1024
example_inputs = tokenizer.encode_plus("This is a sample input",
return_tensors="tf",
max_length=max_input_size,
pad_to_max_length=True,
truncation=True)
example_inputs = filter_inputs(example_inputs, input_names)
example_outputs = model(example_inputs, training=False)
# Flatten is needed for gpt2 and distilgpt2.
example_outputs_flatten = flatten(example_outputs)
example_outputs_flatten = update_flatten_list(example_outputs_flatten, [])
onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False,
use_external_data_format)
if overwrite or not os.path.exists(onnx_model_path):
logger.info("Exporting ONNX model to {}".format(onnx_model_path))
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
import keras2onnx
onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=opset_version)
keras2onnx.save_model(onnx_model, onnx_model_path)
else:
logger.info(f"Skip export since model existed: {onnx_model_path}")
model_type = model_type + '_keras'
onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx(
model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx,
validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path,
example_inputs, example_outputs_flatten)
return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
|
py | 1a36e10edded4abb50b13cdd020c97e8ac3c03ff | from typing import Optional, Union
from fasta_reader import FASTAReader
from hmmer_reader import HMMERParser
from nmm import BaseAlphabet, DNAAlphabet, IUPACAminoAlphabet, RNAAlphabet
Alphabets = Union[DNAAlphabet, RNAAlphabet, IUPACAminoAlphabet]
__all__ = [
"Alphabets",
"alphabet_name",
"infer_alphabet",
"infer_fasta_alphabet",
"infer_hmmer_alphabet",
]
def alphabet_name(alphabet: Alphabets) -> str:
if isinstance(alphabet, IUPACAminoAlphabet):
return "amino"
if isinstance(alphabet, DNAAlphabet):
return "dna"
if isinstance(alphabet, RNAAlphabet):
return "rna"
# TODO: it is temporary
if isinstance(alphabet, BaseAlphabet):
if set(alphabet.symbols) == set(b"ACGT"):
return "dna"
if set(alphabet.symbols) == set(b"ACGU"):
return "rna"
raise ValueError("Unknown alphabet.")
def infer_alphabet(sequence: bytes) -> Optional[Alphabets]:
"""
Infer alphabet from a sequence of symbols.
Parameters
----------
sequence
Sequence of symbols.
"""
dna = DNAAlphabet()
rna = RNAAlphabet()
amino = IUPACAminoAlphabet()
abc = set(sequence)
if len(abc - set(dna.symbols)) == 0:
return dna
if len(abc - set(rna.symbols)) == 0:
return rna
if len(abc - set(amino.symbols)) == 0:
return amino
return None
def infer_fasta_alphabet(parser: FASTAReader) -> Optional[Alphabets]:
"""
Infer alphabet from fasta file.
Parameters
----------
parser
FASTA parser.
"""
for item in parser:
alphabet = infer_alphabet(item.sequence.encode())
if alphabet is not None:
return alphabet
return None
def infer_hmmer_alphabet(parser: HMMERParser) -> Optional[Alphabets]:
for prof in parser:
alph = dict(prof.metadata)["ALPH"].lower()
if alph == "amino":
return IUPACAminoAlphabet()
if alph == "dna":
return DNAAlphabet()
if alph == "rna":
return RNAAlphabet()
return None
|
py | 1a36e148040afad5242eb359316075c31b084044 | from tello import Tello
import sys
from datetime import datetime
import time
import TelloPro
tello = Tello()
command_lst = []
command_lst.append(TelloPro.get_instance('takeoff',-1))
command_lst.append(TelloPro.get_instance('up',50))
command_lst.append(TelloPro.get_instance('down',50))
command_lst.append(TelloPro.get_instance('up',50))
command_lst.append(TelloPro.get_instance('left',50))
command_lst.append(TelloPro.get_instance('right',50))
command_lst.append(TelloPro.get_instance('forward',50))
command_lst.append(TelloPro.get_instance('back',50))
command_lst.append(TelloPro.get_instance('cw',90))
command_lst.append(TelloPro.get_instance('ccw',90))
command_lst.append(TelloPro.get_instance('land',-1))
for command in command_lst:
tello.send_command_instance(command)
|
py | 1a36e1b7ac0770c5d76244c8d6550bf6e99f816b | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`stackdriver_service_monitoring.py`
Stackdriver Service Monitoring exporter class.
"""
import difflib
import json
import logging
import os
import google.api_core.exceptions
from google.cloud.monitoring_v3 import ServiceMonitoringServiceClient
from google.protobuf.json_format import MessageToJson
from slo_generator.backends.stackdriver import StackdriverBackend
from slo_generator.constants import NO_DATA
from slo_generator.utils import dict_snake_to_caml
LOGGER = logging.getLogger(__name__)
SID_GAE = 'gae:{project_id}_{module_id}'
SID_CLOUD_ENDPOINT = 'ist:{project_id}-{service}'
SID_CLUSTER_ISTIO = (
'ist:{project_id}-zone-{location}-{cluster_name}-{service_namespace}-'
'{service_name}')
SID_MESH_ISTIO = ('ist:{mesh_uid}-{service_namespace}-{service_name}')
class StackdriverServiceMonitoringBackend:
"""Stackdriver Service Monitoring backend class.
Args:
project_id (str): Stackdriver host project id.
client (google.cloud.monitoring_v3.ServiceMonitoringServiceClient):
Existing Service Monitoring API client. Initialize a new client if
omitted.
"""
def __init__(self, project_id, client=None):
self.project_id = project_id
self.client = client
if client is None:
self.client = ServiceMonitoringServiceClient()
self.parent = self.client.project_path(project_id)
self.workspace_path = f'workspaces/{project_id}'
self.project_path = f'projects/{project_id}'
def good_bad_ratio(self, timestamp, window, slo_config):
"""Good bad ratio method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def distribution_cut(self, timestamp, window, slo_config):
"""Distribution cut method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def basic(self, timestamp, window, slo_config):
"""Basic method (automatic SLOs for GAE / GKE (Istio) and Cloud
Endpoints).
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def window(self, timestamp, window, slo_config):
"""Window-based SLI method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
# pylint: disable=unused-argument
def delete(self, timestamp, window, slo_config):
"""Delete method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.delete_slo(window, slo_config)
def retrieve_slo(self, timestamp, window, slo_config):
"""Get SLI value from Stackdriver Monitoring API.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
# Get or create service
service = self.get_service(slo_config)
if service is None:
service = self.create_service(slo_config)
LOGGER.debug(service)
# Get or create SLO
slo = self.get_slo(window, slo_config)
if not slo:
slo = self.create_slo(window, slo_config)
LOGGER.debug(service)
# Now that we have our SLO, retrieve the TimeSeries from Stackdriver
# Monitoring API for that particular SLO id.
metric_filter = SSM.build_slo_id(window, slo_config, full=True)
filter = f"select_slo_counts(\"{metric_filter}\")"
# Query SLO timeseries
stackdriver = StackdriverBackend(self.project_id)
timeseries = stackdriver.query(timestamp,
window,
filter,
aligner='ALIGN_SUM',
reducer='REDUCE_SUM',
group_by=['metric.labels.event_type'])
timeseries = list(timeseries)
good_event_count, bad_event_count = SSM.count(timeseries)
return (good_event_count, bad_event_count)
@staticmethod
def count(timeseries):
"""Extract good_count, bad_count tuple from Stackdriver Monitoring API
response.
Args:
timeseries (list): List of timeseries objects.
Returns:
tuple: A tuple (good_event_count, bad_event_count).
"""
good_event_count, bad_event_count = NO_DATA, NO_DATA
for timeserie in timeseries:
event_type = timeserie.metric.labels['event_type']
value = timeserie.points[0].value.double_value
if event_type == 'bad':
bad_event_count = value
elif event_type == 'good':
good_event_count = value
return good_event_count, bad_event_count
def create_service(self, slo_config):
"""Create Service object in Stackdriver Service Monitoring API.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Stackdriver Service Monitoring API response.
"""
LOGGER.debug("Creating service ...")
service_json = SSM.build_service(slo_config)
service_id = SSM.build_service_id(slo_config)
service = self.client.create_service(self.project_path,
service_json,
service_id=service_id)
LOGGER.info(
f'Service "{service_id}" created successfully in Stackdriver '
f'Service Monitoring API.')
return SSM.to_json(service)
def get_service(self, slo_config):
"""Get Service object from Stackdriver Service Monitoring API.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Service config.
"""
# Look for API services in workspace matching our config.
service_id = SSM.build_service_id(slo_config)
services = list(self.client.list_services(self.workspace_path))
matches = [
service for service in services
if service.name.split("/")[-1] == service_id
]
# If no match is found for our service name in the API, raise an
# exception if the service should have been auto-added (method 'basic'),
# else output a warning message.
if not matches:
msg = (f'Service "{service_id}" does not exist in '
f'workspace "{self.project_id}"')
method = slo_config['backend']['method']
if method == 'basic':
sids = [service.name.split("/")[-1] for service in services]
LOGGER.debug(
f'List of services in workspace {self.project_id}: {sids}')
LOGGER.error(msg)
raise Exception(msg)
LOGGER.error(msg)
return None
# Match found in API, return it.
service = matches[0]
LOGGER.debug(f'Found matching service "{service.name}"')
return SSM.to_json(service)
@staticmethod
def build_service(slo_config):
"""Build service JSON in Stackdriver Monitoring API from SLO
configuration.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Service JSON in Stackdriver Monitoring API.
"""
service_id = SSM.build_service_id(slo_config)
display_name = slo_config.get('service_display_name', service_id)
service = {'display_name': display_name, 'custom': {}}
return service
@staticmethod
def build_service_id(slo_config, dest_project_id=None, full=False):
"""Build service id from SLO configuration.
Args:
slo_config (dict): SLO configuration.
dest_project_id (str, optional): Project id for service if different
than the workspace project id.
full (bool): If True, return full service resource id including
project path.
Returns:
str: Service id.
"""
service_name = slo_config['service_name']
feature_name = slo_config['feature_name']
backend = slo_config['backend']
project_id = backend['project_id']
measurement = backend['measurement']
app_engine = measurement.get('app_engine')
cluster_istio = measurement.get('cluster_istio')
mesh_istio = measurement.get('mesh_istio')
cloud_endpoints = measurement.get('cloud_endpoints')
# Use auto-generated ids for 'custom' SLOs, use system-generated ids
# for all other types of SLOs.
if app_engine:
service_id = SID_GAE.format_map(app_engine)
dest_project_id = app_engine['project_id']
elif cluster_istio:
service_id = SID_CLUSTER_ISTIO.format_map(cluster_istio)
dest_project_id = cluster_istio['project_id']
elif mesh_istio:
service_id = SID_MESH_ISTIO.format_map(mesh_istio)
elif cloud_endpoints:
service_id = SID_CLOUD_ENDPOINT.format_map(cloud_endpoints)
dest_project_id = cluster_istio['project_id']
else:
service_id = f'{service_name}-{feature_name}'
if full:
if dest_project_id:
return f'projects/{dest_project_id}/services/{service_id}'
return f'projects/{project_id}/services/{service_id}'
return service_id
def create_slo(self, window, slo_config):
"""Create SLO object in Stackdriver Service Monitoring API.
Args:
window (int): Window (in seconds).
slo_config (dict): SLO config.
Returns:
dict: Service Management API response.
"""
slo_json = SSM.build_slo(window, slo_config)
slo_id = SSM.build_slo_id(window, slo_config)
parent = SSM.build_service_id(slo_config, full=True)
slo = self.client.create_service_level_objective(
parent, slo_json, service_level_objective_id=slo_id)
return SSM.to_json(slo)
@staticmethod
def build_slo(window, slo_config): # pylint: disable=R0912,R0915
"""Get SLO JSON representation in Service Monitoring API from SLO
configuration.
Args:
window (int): Window (in seconds).
slo_config (dict): SLO Configuration.
Returns:
dict: SLO JSON configuration.
"""
measurement = slo_config['backend'].get('measurement', {})
method = slo_config['backend']['method']
description = slo_config['slo_description']
target = slo_config['slo_target']
minutes, _ = divmod(window, 60)
hours, _ = divmod(minutes, 60)
display_name = f'{description} ({hours}h)'
slo = {
'display_name': display_name,
'goal': target,
'rolling_period': {
'seconds': window
}
}
filter_valid = measurement.get('filter_valid', "")
if method == 'basic':
methods = measurement.get('method', [])
locations = measurement.get('location', [])
versions = measurement.get('version', [])
threshold = measurement.get('latency', {}).get('threshold')
slo['service_level_indicator'] = {'basic_sli': {}}
basic_sli = slo['service_level_indicator']['basic_sli']
if methods:
basic_sli['method'] = methods
if locations:
basic_sli['location'] = locations
if versions:
basic_sli['version'] = versions
if threshold:
basic_sli['latency'] = {
'threshold': {
'seconds': 0,
'nanos': int(threshold) * 10**6
}
}
else:
basic_sli['availability'] = {}
elif method == 'good_bad_ratio':
filter_good = measurement.get('filter_good', "")
filter_bad = measurement.get('filter_bad', "")
slo['service_level_indicator'] = {
'request_based': {
'good_total_ratio': {}
}
}
sli = slo['service_level_indicator']
ratio = sli['request_based']['good_total_ratio']
if filter_good:
ratio['good_service_filter'] = filter_good
if filter_bad:
ratio['bad_service_filter'] = filter_bad
if filter_valid:
ratio['total_service_filter'] = filter_valid
elif method == 'distribution_cut':
range_min = measurement.get('range_min', 0)
range_max = measurement['range_max']
slo['service_level_indicator'] = {
'request_based': {
'distribution_cut': {
'distribution_filter': filter_valid,
'range': {
'max': float(range_max)
}
}
}
}
sli = slo['service_level_indicator']['request_based']
if range_min != 0:
sli['distribution_cut']['range']['min'] = float(range_min)
elif method == 'windows':
filter = measurement.get('filter')
# threshold = conf.get('threshold')
# mean_in_range = conf.get('filter')
# sum_in_range = conf.get('filter')
slo['service_level_indicator'] = {
'windows_based': {
'window_period': window,
'good_bad_metric_filter': filter,
# 'good_total_ratio_threshold': {
# object (PerformanceThreshold)
# },
# 'metricMeanInRange': {
# object (MetricRange)
# },
# 'metricSumInRange': {
# object (MetricRange)
# }
}
}
else:
raise Exception(f'Method "{method}" is not supported.')
return slo
def get_slo(self, window, slo_config):
"""Get SLO object from Stackriver Service Monitoring API.
Args:
service_id (str): Service identifier.
window (int): Window in seconds.
slo_config (dict): SLO config.
Returns:
dict: API response.
"""
service_path = SSM.build_service_id(slo_config, full=True)
LOGGER.debug(f'Getting SLO for for "{service_path}" ...')
slos = self.list_slos(service_path)
slo_local_id = SSM.build_slo_id(window, slo_config)
slo_json = SSM.build_slo(window, slo_config)
slo_json = SSM.convert_slo_to_ssm_format(slo_json)
# Loop through API response to find an existing SLO that corresponds to
# our configuration.
for slo in slos:
slo_remote_id = slo['name'].split("/")[-1]
equal = slo_remote_id == slo_local_id
if equal:
LOGGER.debug(f'Found existing SLO "{slo_remote_id}".')
LOGGER.debug(f'SLO object: {slo}')
strict_equal = SSM.compare_slo(slo_json, slo)
if strict_equal:
return slo
return self.update_slo(window, slo_config)
LOGGER.warning('No SLO found matching configuration.')
LOGGER.debug(f'SLOs from Stackdriver Monitoring API: {slos}')
LOGGER.debug(f'SLO config converted: {slo_json}')
return None
def update_slo(self, window, slo_config):
"""Update an existing SLO.
Args:
window (int): Window (in seconds)
slo_config (dict): SLO configuration.
Returns:
dict: API response.
"""
slo_json = SSM.build_slo(window, slo_config)
slo_id = SSM.build_slo_id(window, slo_config, full=True)
LOGGER.warning(f"Updating SLO {slo_id} ...")
slo_json['name'] = slo_id
return SSM.to_json(
self.client.update_service_level_objective(slo_json))
def list_slos(self, service_path):
"""List all SLOs from Stackdriver Service Monitoring API.
Args:
service_path (str): Service path in the form
'projects/{project_id}/services/{service_id}'.
slo_config (dict): SLO configuration.
Returns:
dict: API response.
"""
slos = self.client.list_service_level_objectives(service_path)
slos = list(slos)
LOGGER.debug(f"{len(slos)} SLOs found in Service Monitoring API.")
# LOGGER.debug(slos)
return [SSM.to_json(slo) for slo in slos]
def delete_slo(self, window, slo_config):
"""Delete SLO from Stackdriver Monitoring API.
Args:
window (int): Window (in seconds).
slo_config: SLO configuration.
Returns:
dict: API response.
"""
slo_path = SSM.build_slo_id(window, slo_config, full=True)
LOGGER.info(f'Deleting SLO "{slo_path}"')
try:
return self.client.delete_service_level_objective(slo_path)
except google.api_core.exceptions.NotFound:
LOGGER.warning(
f'SLO "{slo_path}" does not exist in Service Monitoring API. '
f'Skipping.')
return None
@staticmethod
def build_slo_id(window, slo_config, full=False):
"""Build SLO id from SLO configuration.
Args:
slo_config (dict): SLO configuration.
full (bool): If True, return full resource id including project.
Returns:
str: SLO id.
"""
if 'slo_id' in slo_config:
slo_id_part = slo_config['slo_id']
slo_id = f'{slo_id_part}-{window}'
else:
slo_name = slo_config['slo_name']
slo_id = f'{slo_name}-{window}'
if full:
service_path = SSM.build_service_id(slo_config, full=True)
return f'{service_path}/serviceLevelObjectives/{slo_id}'
return slo_id
@staticmethod
def compare_slo(slo1, slo2):
"""Compares 2 SLO configurations to see if they correspond to the same
SLO.
An SLO is deemed the same if the whole configuration is similar, except
for the `goal` field that should be adjustable.
Args:
slo1 (dict): Service Monitoring API SLO configuration to compare.
slo2 (dict): Service Monitoring API SLO configuration to compare.
Returns:
bool: True if the SLOs match, False otherwise.
"""
exclude_keys = ["name"]
slo1_copy = {k: v for k, v in slo1.items() if k not in exclude_keys}
slo2_copy = {k: v for k, v in slo2.items() if k not in exclude_keys}
local_json = json.dumps(slo1_copy, sort_keys=True)
remote_json = json.dumps(slo2_copy, sort_keys=True)
if os.environ.get('DEBUG') == '2':
LOGGER.info("----------")
LOGGER.info(local_json)
LOGGER.info("----------")
LOGGER.info(remote_json)
LOGGER.info("----------")
LOGGER.info(SSM.string_diff(local_json, remote_json))
return local_json == remote_json
@staticmethod
def string_diff(string1, string2):
"""Diff 2 strings. Used to print comparison of JSONs for debugging.
Args:
string1 (str): String 1.
string2 (str): String 2.
Returns:
list: List of messages pointing out differences.
"""
lines = []
for idx, string in enumerate(difflib.ndiff(string1, string2)):
if string[0] == ' ':
continue
if string[0] == '-':
info = u'Delete "{}" from position {}'.format(string[-1], idx)
lines.append(info)
elif string[0] == '+':
info = u'Add "{}" to position {}'.format(string[-1], idx)
lines.append(info)
return lines
@staticmethod
def convert_slo_to_ssm_format(slo):
"""Convert SLO JSON to Service Monitoring API format.
Address edge cases, like `duration` object computation.
Args:
slo (dict): SLO JSON object to be converted to Stackdriver Service
Monitoring API format.
Returns:
dict: SLO configuration in Service Monitoring API format.
"""
# Our local JSON is in snake case, convert it to Caml case.
data = dict_snake_to_caml(slo)
# The `rollingPeriod` field is in Duration format, convert it.
try:
period = data['rollingPeriod']
data['rollingPeriod'] = SSM.convert_duration_to_string(period)
except KeyError:
pass
# The `latency` field is in Duration format, convert it.
try:
latency = data['serviceLevelIndicator']['basicSli']['latency']
threshold = latency['threshold']
latency['threshold'] = SSM.convert_duration_to_string(threshold)
except KeyError:
pass
return data
@staticmethod
def convert_duration_to_string(duration):
"""Convert a duration object to a duration string (in seconds).
Args:
duration (dict): Duration dictionary.
Returns:
str: Duration string.
"""
duration_seconds = 0.000
if 'seconds' in duration:
duration_seconds += duration['seconds']
if 'nanos' in duration:
duration_seconds += duration['nanos'] * 10**(-9)
if duration_seconds.is_integer():
duration_str = int(duration_seconds)
else:
duration_str = "{:0.3f}".format(duration_seconds)
return str(duration_str) + 's'
@staticmethod
def to_json(response):
"""Convert a Stackdriver Service Monitoring API response to JSON
format.
Args:
response (obj): Response object.
Returns:
dict: Response object serialized as JSON.
"""
return json.loads(MessageToJson(response))
SSM = StackdriverServiceMonitoringBackend
|
py | 1a36e1e393bfd7ddf7f0135cca6331913ff9d219 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs = ['java']
config = {
'name':
'client_falls_back_because_balancer_connection_broken_%s' %
transport_sec,
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': 'insecure',
'short_stream': False,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
],
'backend_configs': [{
'transport_sec': backend_sec,
},],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print(yaml.dump({
'lb_interop_test_scenarios': all_scenarios,
}))
|
py | 1a36e2249c38bae89c40d47ce4bea69db03e1236 | def odd_or_even(arr):
return "even" if sum(arr) % 2 == 0 else "odd" |
py | 1a36e382b00ebbdecc9652860a88ba8962a5aee6 | from bs4 import BeautifulSoup
import requests, TTS, time, sys
from TTS import *
# test game url hawks v bucks https://www.espn.com/nba/playbyplay/_/gameId/401337344
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
def get_play_table(url):
game = requests.get(url, headers=headers)
soup = BeautifulSoup(game.text, 'html.parser')
soup = soup.findAll("div", class_="accordion-content collapse in")[1]
return soup.findAll('tr')[1:]
def get_team_names(url):
game = requests.get(url, headers=headers)
soup = BeautifulSoup(game.text, 'html.parser')
soup = soup.find("title")
title_array = soup.text.split(" ")
team1_name = ""
team1_abbr = ""
team2_name = ""
team2_abbr = ""
with open("Teams.txt") as file:
for line in file:
if title_array[0] in line:
team1_name = title_array[0]
team1_abbr = line.split(" ")[0].lower()
elif title_array[2] in line:
team2_name = title_array[2]
team2_abbr = line.split(" ")[0].lower()
return (team1_name, team1_abbr, team2_name, team2_abbr)
def get_team_name(name_list, team_logo_string):
# print(team_logo_string)
if name_list[1] in team_logo_string:
return name_list[0]
elif name_list[3] in team_logo_string:
return name_list[2]
else:
print("Team name error")
def compare_times(last_time, maybe_new_time):
#retrun -1 if after last time, 0 if same as last time, 1 if before (less time on clock), -2 if new quarter
last_split = last_time.split(":")
new_split = maybe_new_time.split(":")
print(last_split, new_split)
last_seconds_total = 0
new_seconds_total = 0
if len(last_split) == 1:
last_seconds_total = float(last_split[0])
elif len(last_split) == 2:
last_seconds_total = float(last_split[0])*60 + float(last_split[1])
else:
print("Last split error: ", last_split)
if len(new_split) == 1:
new_seconds_total = float(new_split[0])
elif len(new_split) == 2:
new_seconds_total = float(new_split[0])*60 + float(new_split[1])
else:
print("New split error: ", new_split)
if len(last_split) < len(new_split):
# this is a new quarter
return -2
else: #same quarter
if last_seconds_total > new_seconds_total:
return 1
elif last_seconds_total < new_seconds_total:
return -1
else:
return 0
def get_zero_time(score_table): #this breaks if there is no zero time
play = score_table[0]
time = play.find('td', class_='time-stamp').text
return str(time)
def get_min_plays(score_table):
length = len(score_table)
if length < 15:
return length
return 15
def start_and_read(last_time, score_table, name_list):
print("A play was made at a new time.")
for i in range(get_min_plays(score_table)-1, -1, -1):
play = score_table[i]
time = play.find('td', class_='time-stamp').text
desc = play.find('td', class_='game-details').text
score = play.find('td', class_='combined-score').text
team_logo = play.find('img')['src']
print(" Zero Time: {} Read Time: {} ".format(last_time, time))
comparison = compare_times(last_time, time)
if comparison == 1:
# print(time, desc, score)
read("{} at {}.".format(get_team_name(name_list, team_logo), time))
# read(time)
if "three" in desc and "makes" in desc:
playBang()
elif "dunk" in desc and "makes" in desc:
playDunk()
elif "makes free throw" in desc:
playFreethrow()
read(desc)
if ("makes" in desc):
read(score)
elif comparison == -2:
return -1
return 0
def new_quarter(url, name_list):
last_time = "13:00"
last_count = 0
while True:
score_table = get_play_table(url)
zero_time = get_zero_time(score_table)
print("Last time: ", last_time, " Last count: ", last_count, " Zero time: ", zero_time)
if last_time != zero_time:
if start_and_read(last_time, score_table, name_list) == -1:
break
last_time = zero_time # update lasttime
# elif count_recent_score(score_table, last_time) > last_count:
# last_count = add_and_read(last_time, last_count, score_table)
else:
time.sleep(10)
def main():
if len(sys.argv) != 3:
print("Usage: run.py <espn play by play url> <quarter #>")
else:
init_tts()
url = sys.argv[1]
quarter_num = int(sys.argv[2])
name_list = get_team_names(url)
for i in range(quarter_num, 5):
new_quarter(url, name_list)
stop_tts()
if __name__ == '__main__':
main()
# get_score_table() |
py | 1a36e3a39ccb1e8d7831d7b7e03f13b963d8d686 | """
Split dacSlopes XML file into individual text files with slope/offset models for each threhsold
Usage:
python dacSlopesXML2TXT.py [-d delim] <input_xml_file> <output_basename>
where:
<input_xml_file> = input dacSlopes GLAST Cal offline calibration file
<output_basename> = each output file will be based on this name with additional extension.
-d delim = optional field delimeter override (default = ' ')
Outputs:
For LAC,FLE,FHE:
output format is one line per crystal face:
twr lyr col face dac_slope(mev/adc) dac_offest(mev) dac_range(0,1)
For ULD:
output format is one line per adc channel:
twr lyr col face range uld_slope(mev/adc) uld_offset(mev) uld_dac_range(0,1)
"""
__facility__ = "Offline"
__abstract__ = "Split dacSlopes XML file into individual TXT files"
__author__ = "Z.Fewtrell"
__date__ = "$Date: 2008/07/06 22:43:14 $"
__version__ = "$Revision: 1.1 $, $Author: fewtrell $"
__release__ = "$Name: $"
__credits__ = "NRL code 7650"
if __name__ == '__main__':
# check commandline
delim = ' '
import getopt
import sys
try:
(opts,args) = getopt.getopt(sys.argv[1:], "d:")
except getopt.GetoptError:
log.error(__doc__)
sys.exit(1)
# opts has 2 parts, options (-abc ...) & remaining default params
for o, a in opts:
if o == '-d':
delim = a
if len(args) != 2:
print "no input file specified: ", __doc__
sys.exit(1)
# retrieve commandline parms
(inName, outputBasename) = args
# open and read XML dacSlopes file
import calCalibXML
xmlFile = calCalibXML.calDacSlopesCalibXML(inName)
(dacData, uldData, rngData) = xmlFile.read()
towers = xmlFile.getTowers()
xmlFile.close()
# open output files
lacFilename = outputBasename + '.lac_slope.txt'
fleFilename = outputBasename + '.fle_slope.txt'
fheFilename = outputBasename + '.fhe_slope.txt'
uldFilename = outputBasename + '.uld_slope.txt'
lacFile = open(lacFilename, 'w')
fleFile = open(fleFilename, 'w')
fheFile = open(fheFilename, 'w')
uldFile = open(uldFilename, 'w')
# print out headers as comment
lacFile.write(";twr lyr col face lac_slope lac_offest lac_rng\n")
fleFile.write(";twr lyr col face fle_slope fle_offest fle_rng\n")
fheFile.write(";twr lyr col face fhe_slope fhe_offest fhe_rng\n")
uldFile.write(";twr lyr col face uld_slope uld_offest uld_rng\n")
# print out txt file.
import calConstant
from calCalibXML import *
for twr in towers:
for lyr in range(calConstant.NUM_LAYER):
# calCalibXML uses 'row' indexing, not layer
row = layerToRow(lyr)
for col in range(calConstant.NUM_FE):
for face in range(calConstant.NUM_END):
online_face = calConstant.offline_face_to_online[face]
lacFile.write(delim.join([
str(x) for x in twr, lyr, col, face,\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_LACDAC_SLOPE],\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_LACDAC_OFFSET],\
rngData[twr][row][online_face][col][calDacSlopesCalibXML.RNGDATA_LACDAC]]))
lacFile.write("\n")
fleFile.write(delim.join([
str(x) for x in twr, lyr, col, face,\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_FLEDAC_SLOPE],\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_FLEDAC_OFFSET],\
rngData[twr][row][online_face][col][calDacSlopesCalibXML.RNGDATA_FLEDAC]]))
fleFile.write("\n")
fheFile.write(delim.join([
str(x) for x in twr, lyr, col, face,\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_FHEDAC_SLOPE],\
dacData[twr][row][online_face][col][calDacSlopesCalibXML.DACDATA_FHEDAC_OFFSET],\
rngData[twr][row][online_face][col][calDacSlopesCalibXML.RNGDATA_FHEDAC]]))
fheFile.write("\n")
for rng in range(calConstant.NUM_RNG-1): # only process first 3 ranges
uldFile.write(delim.join([
str(x) for x in twr, lyr, col, face, rng,
uldData[rng][twr][row][online_face][col][calDacSlopesCalibXML.ULDDATA_SLOPE],\
uldData[rng][twr][row][online_face][col][calDacSlopesCalibXML.ULDDATA_OFFSET],\
rngData[twr][row][online_face][col][calDacSlopesCalibXML.RNGDATA_ULDDAC_LEX8+rng],\
uldData[rng][twr][row][online_face][col][calDacSlopesCalibXML.ULDDATA_SAT]]))
uldFile.write("\n")
|
py | 1a36e3bdd524430510b0839c624a295e4b8752ad | def facebook_test_users():
return [
{
'name': 'Unittest Jacobs',
'installed': True,
'permissions': [],
'friends': ['Unittest Deschain','Unittest Billows']
},
{
'name': 'Unittest Deschain',
'installed': True,
'permissions': [],
'friends': ['Unittest Jacobs','Unittest Billows']
},
{
'name': 'Unittest Billows',
'installed': True,
'permissions': [],
'friends': ['Unittest Deschain', 'Unittest Jacobs']
},
]
|
py | 1a36e3c80a13f91e37e4d90b7ae47c7e0d204144 | _base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
py | 1a36e403de46b3cb86657670ed508ed0d879ca0e | # Generated by Django 3.0.6 on 2020-05-19 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | 1a36e427984d1f62a343b4e3eaee5ee444c6d8e2 | from django.apps import AppConfig
class ModelLocationConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'model_location'
verbose_name = 'location'
|
py | 1a36e5421ecd1fd3c6594083c9d46af65f4e7b6a | """Tests for the Wemo light entity via the bridge."""
import pytest
import pywemo
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.components.wemo.light import MIN_TIME_BETWEEN_SCANS
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from . import entity_test_helpers
from tests.async_mock import create_autospec, patch
@pytest.fixture
def pywemo_model():
"""Pywemo Bridge models use the light platform (WemoLight class)."""
return "Bridge"
# Note: The ordering of where the pywemo_bridge_light comes in test arguments matters.
# In test methods, the pywemo_bridge_light fixture argument must come before the
# wemo_entity fixture argument.
@pytest.fixture(name="pywemo_bridge_light")
def pywemo_bridge_light_fixture(pywemo_device):
"""Fixture for Bridge.Light WeMoDevice instances."""
light = create_autospec(pywemo.ouimeaux_device.bridge.Light, instance=True)
light.uniqueID = pywemo_device.serialnumber
light.name = pywemo_device.name
light.bridge = pywemo_device
light.state = {"onoff": 0}
pywemo_device.Lights = {pywemo_device.serialnumber: light}
return light
def _bypass_throttling():
"""Bypass the util.Throttle on the update_lights method."""
utcnow = dt_util.utcnow()
def increment_and_return_time():
nonlocal utcnow
utcnow += MIN_TIME_BETWEEN_SCANS
return utcnow
return patch("homeassistant.util.utcnow", side_effect=increment_and_return_time)
async def test_async_update_locked_multiple_updates(
hass, pywemo_registry, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that two state updates do not proceed at the same time."""
pywemo_device.bridge_update.reset_mock()
with _bypass_throttling():
await entity_test_helpers.test_async_update_locked_multiple_updates(
hass,
pywemo_registry,
wemo_entity,
pywemo_device,
update_polling_method=pywemo_device.bridge_update,
)
async def test_async_update_with_timeout_and_recovery(
hass, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that the entity becomes unavailable after a timeout, and that it recovers."""
await entity_test_helpers.test_async_update_with_timeout_and_recovery(
hass, wemo_entity, pywemo_device
)
async def test_async_locked_update_with_exception(
hass, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that the entity becomes unavailable when communication is lost."""
with _bypass_throttling():
await entity_test_helpers.test_async_locked_update_with_exception(
hass,
wemo_entity,
pywemo_device,
update_polling_method=pywemo_device.bridge_update,
)
async def test_light_update_entity(
hass, pywemo_registry, pywemo_bridge_light, wemo_entity
):
"""Verify that the light performs state updates."""
await async_setup_component(hass, HA_DOMAIN, {})
# On state.
pywemo_bridge_light.state = {"onoff": 1}
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_ON
# Off state.
pywemo_bridge_light.state = {"onoff": 0}
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
|
py | 1a36e54c89ac5631c4a5e1009a7649ac1997c543 | # Este es un modulo que podemos importar desde otro archivo
def sumar(x, y):
print(x+y)
def restar(x, y):
print(x-y) |
py | 1a36e6347b3505f9784231c1d6d90a5eab2a552f | # coding: utf-8
# Copyright © 2019 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Ingest API
Use the Ingest service in Splunk Cloud Services to send event and metrics data, or upload a static file, to Splunk Cloud Services.
OpenAPI spec version: v1beta2.2 (recommended default)
Generated by: https://openapi-generator.tech
"""
from requests import Response
from string import Template
from typing import List, Dict
from splunk_sdk.base_client import handle_response
from splunk_sdk.base_service import BaseService
from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel
from splunk_sdk.ingest.v1beta2.gen_models import Error
from splunk_sdk.ingest.v1beta2.gen_models import Event
from splunk_sdk.ingest.v1beta2.gen_models import HTTPResponse
from splunk_sdk.ingest.v1beta2.gen_models import List
from splunk_sdk.ingest.v1beta2.gen_models import MetricEvent
class IngestAPI(BaseService):
"""
Ingest API
Version: v1beta2.2
Use the Ingest service in Splunk Cloud Services to send event and metrics data, or upload a static file, to Splunk Cloud Services.
"""
def __init__(self, base_client):
super().__init__(base_client)
def post_events(self, event: List[Event] = None, query_params: Dict[str, object] = None) -> HTTPResponse:
"""
Sends events.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/ingest/v1beta2/events").substitute(path_params)
url = self.base_client.build_url(path)
data = [e.to_dict() for e in event]
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, HTTPResponse)
def post_metrics(self, metric_event: List[MetricEvent] = None, query_params: Dict[str, object] = None) -> HTTPResponse:
"""
Sends metric events.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/ingest/v1beta2/metrics").substitute(path_params)
url = self.base_client.build_url(path)
data = [e.to_dict() for e in metric_event]
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, HTTPResponse)
|
py | 1a36e7ff3507b3b768e890295aa0ba10e20c6d15 | '''Utilities relating to interaction with service plans
************************************************************************
FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
SHOULD ALSO BE APPLIED TO sdk_plan IN ANY OTHER PARTNER REPOS
************************************************************************
'''
import json
import logging
import os.path
import traceback
import retrying
import sdk_cmd
import sdk_utils
TIMEOUT_SECONDS = 15 * 60
SHORT_TIMEOUT_SECONDS = 30
log = logging.getLogger(__name__)
def get_deployment_plan(service_name, timeout_seconds=TIMEOUT_SECONDS):
return get_plan(service_name, 'deploy', timeout_seconds)
def get_recovery_plan(service_name, timeout_seconds=TIMEOUT_SECONDS):
return get_plan(service_name, 'recovery', timeout_seconds)
def list_plans(service_name, timeout_seconds=TIMEOUT_SECONDS):
return sdk_cmd.service_request('GET', service_name, '/v1/plans', timeout_seconds=timeout_seconds).json()
def get_plan(service_name, plan, timeout_seconds=TIMEOUT_SECONDS):
# We need to DIY error handling/retry because the query will return 417 if the plan has errors.
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000)
def wait_for_plan():
response = sdk_cmd.service_request(
'GET', service_name, '/v1/plans/{}'.format(plan),
retry=False,
raise_on_error=False)
if response.status_code == 417:
return response # avoid throwing, return plan with errors
response.raise_for_status()
return response
return wait_for_plan().json()
def start_plan(service_name, plan, parameters=None):
sdk_cmd.service_request(
'POST', service_name, '/v1/plans/{}/start'.format(plan),
json=parameters if parameters is not None else {})
def wait_for_completed_recovery(service_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_completed_plan(service_name, 'recovery', timeout_seconds)
def wait_for_in_progress_recovery(service_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_in_progress_plan(service_name, 'recovery', timeout_seconds)
def wait_for_kicked_off_deployment(service_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_kicked_off_plan(service_name, 'deploy', timeout_seconds)
def wait_for_kicked_off_recovery(service_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_kicked_off_plan(service_name, 'recovery', timeout_seconds)
def wait_for_completed_deployment(service_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_completed_plan(service_name, 'deploy', timeout_seconds)
def wait_for_completed_plan(service_name, plan_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_plan_status(service_name, plan_name, 'COMPLETE', timeout_seconds)
def wait_for_completed_phase(service_name, plan_name, phase_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_phase_status(service_name, plan_name, phase_name, 'COMPLETE', timeout_seconds)
def wait_for_completed_step(service_name, plan_name, phase_name, step_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_step_status(service_name, plan_name, phase_name, step_name, 'COMPLETE', timeout_seconds)
def wait_for_kicked_off_plan(service_name, plan_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_plan_status(service_name, plan_name, ['STARTING', 'IN_PROGRESS'], timeout_seconds)
def wait_for_in_progress_plan(service_name, plan_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_plan_status(service_name, plan_name, 'IN_PROGRESS', timeout_seconds)
def wait_for_starting_plan(service_name, plan_name, timeout_seconds=TIMEOUT_SECONDS):
return wait_for_plan_status(service_name, plan_name, 'STARTING', timeout_seconds)
def wait_for_plan_status(service_name, plan_name, status, timeout_seconds=TIMEOUT_SECONDS):
'''Wait for a plan to have one of the specified statuses'''
if isinstance(status, str):
statuses = [status, ]
else:
statuses = status
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
plan = get_plan(service_name, plan_name, SHORT_TIMEOUT_SECONDS)
log.info('Waiting for {} plan to have {} status:\n{}'.format(
plan_name, status, plan_string(plan_name, plan)))
if plan and plan['status'] in statuses:
return plan
else:
return False
return fn()
def wait_for_phase_status(service_name, plan_name, phase_name, status, timeout_seconds=TIMEOUT_SECONDS):
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
plan = get_plan(service_name, plan_name, SHORT_TIMEOUT_SECONDS)
phase = get_phase(plan, phase_name)
log.info('Waiting for {}.{} phase to have {} status:\n{}'.format(
plan_name, phase_name, status, plan_string(plan_name, plan)))
if phase and phase['status'] == status:
return plan
else:
return False
return fn()
def wait_for_step_status(service_name, plan_name, phase_name, step_name, status, timeout_seconds=TIMEOUT_SECONDS):
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
plan = get_plan(service_name, plan_name, SHORT_TIMEOUT_SECONDS)
step = get_step(get_phase(plan, phase_name), step_name)
log.info('Waiting for {}.{}.{} step to have {} status:\n{}'.format(
plan_name, phase_name, step_name, status, plan_string(plan_name, plan)))
if step and step['status'] == status:
return plan
else:
return False
return fn()
def recovery_plan_is_empty(service_name):
plan = get_recovery_plan(service_name)
return len(plan['phases']) == 0 and len(plan['errors']) == 0 and plan['status'] == 'COMPLETE'
def get_phase(plan, name):
return get_child(plan, 'phases', name)
def get_step(phase, name):
return get_child(phase, 'steps', name)
def get_child(parent, children_field, name):
if parent is None:
return None
for child in parent[children_field]:
if child['name'] == name:
return child
return None
def plan_string(plan_name, plan):
if plan is None:
return '{}=NULL!'.format(plan_name)
def phase_string(phase):
''' Formats the phase output as follows:
deploy STARTING:
- node-deploy STARTING: node-0:[server]=STARTING, node-1:[server]=PENDING, node-2:[server]=PENDING
- node-other PENDING: somestep=PENDING
- errors: foo, bar
'''
return '\n- {} {}: {}'.format(
phase['name'],
phase['status'],
', '.join('{}={}'.format(step['name'], step['status']) for step in phase['steps']))
plan_str = '{} {}:{}'.format(
plan_name,
plan['status'],
''.join(phase_string(phase) for phase in plan['phases']))
if plan.get('errors', []):
plan_str += '\n- errors: {}'.format(', '.join(plan['errors']))
return plan_str
|
py | 1a36ea12e86c8a21acacc241978c66be9c625277 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
if inputs['input_data'].shape[
1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']:
return False
if attrs[0]['groups'] <= 1:
return False
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000:
if attrs[0]['padding_algorithm'] == 'SAME' and (
attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1):
return False
return True
def sample_program_configs(self):
self.trt_param.workspace_size = 1073741824
def generate_input1(batch, attrs: List[Dict[str, Any]]):
return np.ones([batch, attrs[0]['groups'] * 3, 64,
64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32)
def generate_weight2(attrs: List[Dict[str, Any]]):
return np.random.random([24, 1, 1]).astype(np.float32)
for batch in [1, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [2, 3]:
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']:
for dilations in [[1, 1], [2, 2], [1, 2]]:
for data_format in ['NCHW']:
dics = [{
"data_fromat": data_format,
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"data_format": data_format
}, {
"axis": 1
}]
ops_config = [{
"op_type": "conv2d",
"op_inputs": {
"Input": ["input_data"],
"Filter": ["conv2d_weight"]
},
"op_outputs": {
"Output": ["conv_output_data"]
},
"op_attrs": dics[0]
}, {
"op_type": "elementwise_add",
"op_inputs": {
"X": ["conv_output_data"],
"Y": ["elementwise_weight"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": dics[1]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"conv2d_weight":
TensorConfig(data_gen=partial(
generate_weight1, dics)),
"elementwise_weight":
TensorConfig(data_gen=partial(
generate_weight2, dics))
},
inputs={
"input_data":
TensorConfig(data_gen=partial(
generate_input1, batch, dics))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
input_groups = attrs[0]['groups'] * 3
self.dynamic_shape.min_input_shape = {
"input_data": [1, input_groups, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, input_groups, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, input_groups, 64, 64],
"output_data": [1, 24, 64, 64]
}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
return 1, 2
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
def test(self):
self.run_test()
def test_quant(self):
self.run_test(quant=True)
if __name__ == "__main__":
unittest.main()
|
py | 1a36ea1bd54e39368a4259707f4b8904a5c97709 | """
*Block Lastline Horizontal-Alignment*
"""
__all__ = ["TextAlignLastLine"]
from enum import Enum
class TextAlignLastLineOption(
Enum,
):
Auto = "auto"
Start = "start"
End = "end"
Left = "left"
Center = "center"
Right = "right"
Justify = "justify"
class TextAlignLastLine(
BlockElementOperator,
):
pass
|
py | 1a36eab3bf10ab3a2d5ab44f088cb0b6e4429664 | """
Build a simple factorial bundle using MuAPI and run it.
Run with:
PYTHONPATH=$PYPY_MU:$PYTHONPATH LIBRARY_PATH=$MU/cbinding:$LIBRARY_PATH python target_rmu_bundlebuilding.py
OR:
rpython target_rmu_bundlebuilding.py
LD_LIBRARY_PATH=$MU/cbinding:$LD_LIBRARY_PATH ./target_rmu_bundlebuilding-c
"""
from rpython.rlib.rmu_genc import *
fac_bundle = """
.typedef @i1 = int<1>
.typedef @i8 = int<8>
.typedef @i32 = int<32>
.typedef @i64 = int<64>
.typedef @uptri8 = uptr<@i8>
.typedef @uptruptri8 = uptr<@uptri8>
.const @0_i64 <@i64> = 0
.const @1_i64 <@i64> = 1
.const @10_i64 <@i64> = 10
.global @gblresult <@i64>
.funcsig @sig_i64_i64 = (@i64) -> (@i64)
.funcdef @fac VERSION %v1 <@sig_i64_i64> {
%blk0(<@i64> %n_0):
%v5 = EQ <@i64> %n_0 @0_i64
%v6 = EQ <@i64> %n_0 @1_i64
%v7 = OR <@i1> %v5 %v6
BRANCH2 %v7 %blk2(@1_i64) %blk1(%n_0)
%blk1(<@i64> %n_1):
%v8 = SUB <@i64> %n_1 @1_i64
%v9 = CALL <@sig_i64_i64> @fac (%v8)
%v10 = MUL <@i64> %n_1 %v9
BRANCH %blk2(%v10)
%blk2(<@i64> %v11):
RET (%v11)
}
.funcsig @sig_i32uptruptri8_ = (@i32 @uptruptri8) -> ()
.funcdef @main VERSION %v1 <@sig_i32uptruptri8_> {
%blk0(<@i32> %argc <@uptruptri8> %argv):
%res = CALL <@sig_i64_i64> @fac (@10_i64)
STORE <@i64> @gblresult %res
COMMINST @uvm.thread_exit
}
"""
def main_load(args):
# Load the bundle and run, verify its correctness
mu = MuVM("vmLog=ERROR")
ctx = mu.new_context()
ctx.load_bundle(fac_bundle)
# Get handle to @main function, and execute it
main_id = ctx.id_of("@main")
main_h = ctx.handle_from_func(main_id)
stack_h = ctx.new_stack(main_h)
thread_h = ctx.new_thread_nor(stack_h, lltype.nullptr(MuValue.TO), [])
mu.execute()
# Load result from global cell
gbl_id = ctx.id_of("@gblresult")
gbl_h = ctx.handle_from_global(gbl_id)
res_h = ctx.load(MuMemOrd.NOT_ATOMIC, gbl_h)
res = ctx.handle_to_sint64(res_h)
print "fac(10) = %d" % res
return 0
def main_build(args):
mu = MuVM("vmLog=ERROR")
ctx = mu.new_context()
bldr = ctx.new_ir_builder()
i1 = bldr.gen_sym("@i1")
bldr.new_type_int(i1, 1)
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
i32 = bldr.gen_sym("@i32")
bldr.new_type_int(i32, 32)
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
uptri8 = bldr.gen_sym("@uptri8")
bldr.new_type_uptr(uptri8, i8)
uptruptri8 = bldr.gen_sym("@uptruptri8")
bldr.new_type_uptr(uptruptri8, uptri8)
c_0_i64 = bldr.gen_sym("@0_i64")
bldr.new_const_int(c_0_i64, i64, 0)
c_1_i64 = bldr.gen_sym("@1_i64")
bldr.new_const_int(c_1_i64, i64, 1)
c_10_i64 = bldr.gen_sym("@10_64")
bldr.new_const_int(c_10_i64, i64, 10)
gblres = bldr.gen_sym("@gblresult")
bldr.new_global_cell(gblres, i64)
# ----
# fac
sig_i64_i64 = bldr.gen_sym("@sig_i64_i64")
bldr.new_funcsig(sig_i64_i64, [i64], [i64])
fac = bldr.gen_sym("@fac")
bldr.new_func(fac, sig_i64_i64)
fac_v1 = bldr.gen_sym()
blk0 = bldr.gen_sym()
blk1 = bldr.gen_sym()
blk2 = bldr.gen_sym()
fac_v1 = bldr.new_func_ver(fac_v1, fac, [blk0, blk1, blk2])
# blk0
n_0 = bldr.gen_sym()
v5 = bldr.gen_sym()
v6 = bldr.gen_sym()
v7 = bldr.gen_sym()
blk0_cmp0 = bldr.gen_sym()
blk0_cmp1 = bldr.gen_sym()
blk0_or = bldr.gen_sym()
blk0_br2 = bldr.gen_sym()
blk0_br2_t = bldr.gen_sym()
blk0_br2_f = bldr.gen_sym()
bldr.new_bb(blk0, [n_0], [i64], MU_NO_ID, [blk0_cmp0, blk0_cmp1, blk0_or, blk0_br2])
bldr.new_cmp(blk0_cmp0, v5, MuCmpOptr.EQ, i64, n_0, c_0_i64)
bldr.new_cmp(blk0_cmp1, v6, MuCmpOptr.EQ, i64, n_0, c_1_i64)
bldr.new_binop(blk0_or, v7, MuBinOptr.OR, i1, v5, v6)
bldr.new_dest_clause(blk0_br2_t, blk2, [c_1_i64])
bldr.new_dest_clause(blk0_br2_f, blk1, [n_0])
bldr.new_branch2(blk0_br2, v7, blk0_br2_t, blk0_br2_f)
# blk1
n_1 = bldr.gen_sym()
v8 = bldr.gen_sym()
v9 = bldr.gen_sym()
v10 = bldr.gen_sym()
blk1_sub = bldr.gen_sym()
blk1_call = bldr.gen_sym()
blk1_mul = bldr.gen_sym()
blk1_br = bldr.gen_sym()
blk1_br_d = bldr.gen_sym()
bldr.new_bb(blk1, [n_1], [i64], MU_NO_ID, [blk1_sub, blk1_call, blk1_mul, blk1_br])
bldr.new_binop(blk1_sub, v8, MuBinOptr.SUB, i64, n_1, c_1_i64)
bldr.new_call(blk1_call, [v9], sig_i64_i64, fac, [v8])
bldr.new_binop(blk1_mul, v10, MuBinOptr.MUL, i64, n_1, v9)
bldr.new_dest_clause(blk1_br_d, blk2, [v10])
bldr.new_branch(blk1_br, blk1_br_d)
# blk2
v11 = bldr.gen_sym()
blk2_ret = bldr.gen_sym()
bldr.new_bb(blk2, [v11], [i64], MU_NO_ID, [blk2_ret])
bldr.new_ret(blk2_ret, [v11])
# ----
# main
sig_i32uptruptri8_ = bldr.gen_sym("@sig_i32uptruptri8_")
main = bldr.gen_sym("@main")
main_v1 = bldr.gen_sym("@main_v1")
bldr.new_funcsig(sig_i32uptruptri8_, [i32, uptruptri8], [])
bldr.new_func(main, sig_i32uptruptri8_)
blk0 = bldr.gen_sym()
bldr.new_func_ver(main_v1, main, [blk0])
# blk0
res = bldr.gen_sym()
blk0_call = bldr.gen_sym()
blk0_store = bldr.gen_sym()
blk0_comminst = bldr.gen_sym()
argc = bldr.gen_sym()
argv = bldr.gen_sym()
bldr.new_bb(blk0, [argc, argv], [i32, uptruptri8], MU_NO_ID, [blk0_call, blk0_store, blk0_comminst])
bldr.new_call(blk0_call, [res], sig_i64_i64, fac, [c_10_i64])
bldr.new_store(blk0_store, False, MuMemOrd.NOT_ATOMIC, i64, gblres, res)
bldr.new_comminst(blk0_comminst, [], MuCommInst.THREAD_EXIT, [], [], [], [])
bldr.load()
main_h = ctx.handle_from_func(main)
# execute bundle
# stack_h = ctx.new_stack(main_h)
# thread_h = ctx.new_thread_nor(stack_h, lltype.nullptr(MuValue.TO), [])
#
# mu.execute()
#
# # Load result from global cell
# gbl_h = ctx.handle_from_global(gblres)
# res_h = ctx.load(MuMemOrd.NOT_ATOMIC, gbl_h)
# res = ctx.handle_to_sint64(res_h)
#
# print "fac(10) = %d" % res
# make boot image
ctx.make_boot_image([i1, i8, i32, i64, uptri8, uptruptri8,
c_0_i64, c_1_i64, c_10_i64, gblres,
sig_i32uptruptri8_, sig_i64_i64, fac, main],
main_h, null(MuStackRefValue), null(MuRefValue), [], [], [], [], args[1])
mu.close() # Don't forget to close it
import sys
apilog.dump(sys.stdout)
return 0
# ----------------------------------------------------------------------------------------
main = main_build
def target(*args):
return main, None
if __name__ == "__main__":
import sys
main(sys.argv)
|
py | 1a36eb1f6caa49cd15e3f69382da247daa3c3d1e | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import copy
# External imports
from six.moves import xrange
# Bokeh imports
from bokeh.core.properties import List, String, Instance, Dict, Any, Int
from bokeh.model import Model
from bokeh.core.property.wrappers import PropertyValueList, PropertyValueDict
from bokeh.util.future import with_metaclass
# Module under test
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def large_plot(n):
from bokeh.models import (
Plot, LinearAxis, Grid, GlyphRenderer,
ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool,
BoxSelectTool, SaveTool, ResetTool
)
from bokeh.models.layouts import Column
from bokeh.models.glyphs import Line
col = Column()
objects = set([col])
for i in xrange(n):
source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1]))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
xaxis = LinearAxis(plot=plot)
yaxis = LinearAxis(plot=plot)
xgrid = Grid(plot=plot, dimension=0)
ygrid = Grid(plot=plot, dimension=1)
tickers = [xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter]
glyph = Line(x='x', y='y')
renderer = GlyphRenderer(data_source=source, glyph=glyph)
plot.renderers.append(renderer)
pan = PanTool()
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
wheel_zoom = WheelZoomTool()
box_zoom = BoxZoomTool()
box_select = BoxSelectTool()
save = SaveTool()
reset = ResetTool()
tools = [pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, save, reset]
plot.add_tools(*tools)
col.children.append(plot)
objects |= set([
xdr, ydr,
xaxis, yaxis,
xgrid, ygrid,
renderer, renderer.view, glyph,
source, source.selected, source.selection_policy,
plot, plot.x_scale, plot.y_scale, plot.toolbar, plot.title,
box_zoom.overlay, box_select.overlay,
] + tickers + tools)
return col, objects
class TestMetaModel(object):
def setup_method(self):
from bokeh.model import MetaModel
self.metamodel = MetaModel
self.old_map = copy.copy(self.metamodel.model_class_reverse_map)
def teardown_method(self):
self.metamodel.model_class_reverse_map = self.old_map
def mkclass(self):
class Test_Class(with_metaclass(self.metamodel)):
foo = 1
return Test_Class
def test_metaclassing(self):
tclass = self.mkclass()
assert hasattr(tclass, '__view_model__')
with pytest.raises(Warning):
self.mkclass()
def test_get_class(self):
from bokeh.model import get_class
self.mkclass()
tclass = get_class('Test_Class')
assert hasattr(tclass, 'foo')
with pytest.raises(KeyError):
get_class('Imaginary_Class')
class DeepModel(Model):
child = Instance(Model)
class TestCollectModels(object):
def test_references_large(self):
root, objects = large_plot(10)
assert set(root.references()) == objects
def test_references_deep(self):
root = DeepModel()
objects = set([root])
parent = root
# in a previous implementation, about 400 would blow max
# recursion depth, so we double that and a little bit,
# here.
for i in xrange(900):
model = DeepModel()
objects.add(model)
parent.child = model
parent = model
assert set(root.references()) == objects
class SomeModelToJson(Model):
child = Instance(Model)
foo = Int()
bar = String()
class TestModel(object):
def setup_method(self):
from bokeh.model import Model
self.pObjectClass = Model
self.maxDiff = None
def test_init(self):
testObject = self.pObjectClass(id='test_id')
assert testObject.id == 'test_id'
testObject2 = self.pObjectClass()
assert testObject2.id is not None
assert set(["name", "tags", "js_property_callbacks", "subscribed_events", "js_event_callbacks"]) == testObject.properties()
assert dict(
name=None, tags=[], js_property_callbacks={}, js_event_callbacks={}, subscribed_events=[]
) == testObject.properties_with_values(include_defaults=True)
assert dict() == testObject.properties_with_values(include_defaults=False)
def test_ref(self):
testObject = self.pObjectClass(id='test_id')
assert {'type': 'Model', 'id': 'test_id'} == testObject.ref
def test_references_by_ref_by_value(self):
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Instance, Int
class T(self.pObjectClass):
t = Int(0)
class Y(self.pObjectClass):
t1 = Instance(T)
class Z1(HasProps):
t2 = Instance(T)
class Z2(self.pObjectClass):
t2 = Instance(T)
class X1(self.pObjectClass):
y = Instance(Y)
z1 = Instance(Z1)
class X2(self.pObjectClass):
y = Instance(Y)
z2 = Instance(Z2)
t1, t2 = T(t=1), T(t=2)
y = Y(t1=t1)
z1, z2 = Z1(t2=t2), Z2(t2=t2)
x1 = X1(y=y, z1=z1)
x2 = X2(y=y, z2=z2)
assert x1.references() == {t1, y, t2, x1}
assert x2.references() == {t1, y, t2, z2, x2}
def test_references_in_containers(self):
from bokeh.core.properties import Int, String, Instance, List, Tuple, Dict
# XXX: can't use Y, because of:
#
# Warning: Duplicate __view_model__ declaration of 'Y' for class Y.
# Previous definition: <class 'bokeh.tests.test_objects.Y'>
class U(self.pObjectClass):
a = Int
class V(self.pObjectClass):
u1 = Instance(U)
u2 = List(Instance(U))
u3 = Tuple(Int, Instance(U))
u4 = Dict(String, Instance(U))
u5 = Dict(String, List(Instance(U)))
u1, u2, u3, u4, u5 = U(a=1), U(a=2), U(a=3), U(a=4), U(a=5)
v = V(u1=u1, u2=[u2], u3=(3, u3), u4={"4": u4}, u5={"5": [u5]})
assert v.references() == set([v, u1, u2, u3, u4, u5])
def test_to_json(self):
child_obj = SomeModelToJson(foo=57, bar="hello")
obj = SomeModelToJson(child=child_obj,
foo=42, bar="world")
json = obj.to_json(include_defaults=True)
json_string = obj.to_json_string(include_defaults=True)
assert { "child" : { "id" : child_obj.id, "type" : "SomeModelToJson" },
"id" : obj.id,
"name" : None,
"tags" : [],
'js_property_callbacks': {},
"js_event_callbacks" : {},
"subscribed_events" : [],
"foo" : 42,
"bar" : "world" } == json
assert ('{"bar":"world",' +
'"child":{"id":"%s","type":"SomeModelToJson"},' +
'"foo":42,"id":"%s","js_event_callbacks":{},"js_property_callbacks":{},' +
'"name":null,"subscribed_events":[],"tags":[]}') % (child_obj.id, obj.id) == json_string
def test_no_units_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert 'outer_radius' in json
assert 'outer_radius_units' not in json
def test_dataspec_field_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = "fieldname"
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', field='fieldname') == json['start_angle']
def test_dataspec_value_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = 60
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', value=60) == json['start_angle']
def test_list_default(self):
class HasListDefault(Model):
value = List(String, default=["hello"])
obj = HasListDefault()
assert obj.value == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value.append("world")
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
def test_dict_default(self):
class HasDictDefault(Model):
value = Dict(String, Int, default=dict(hello=42))
obj = HasDictDefault()
assert obj.value == obj.value
assert dict(hello=42) == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value['world'] = 57
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
assert dict(hello=42, world=57) == obj.value
def test_func_default_with_counter(self):
counter = dict(value=0)
def next_value():
counter['value'] += 1
return counter['value']
class HasFuncDefaultInt(Model):
value = Int(default=next_value)
obj1 = HasFuncDefaultInt()
obj2 = HasFuncDefaultInt()
assert obj1.value+1 == obj2.value
# 'value' is a default, but it gets included as a
# non-default because it's unstable.
assert 'value' in obj1.properties_with_values(include_defaults=False)
def test_func_default_with_model(self):
class HasFuncDefaultModel(Model):
child = Instance(Model, lambda: Model())
obj1 = HasFuncDefaultModel()
obj2 = HasFuncDefaultModel()
assert obj1.child.id != obj2.child.id
# 'child' is a default, but it gets included as a
# non-default because it's unstable.
assert 'child' in obj1.properties_with_values(include_defaults=False)
class TestContainerMutation(object):
def _check_mutation(self, obj, attr, mutator, expected_event_old, expected_event_new):
result = dict(calls=[])
def record_trigger(attr, old, new_):
result['calls'].append((attr, old, new_))
obj.on_change(attr, record_trigger)
try:
actual_old = getattr(obj, attr)
assert expected_event_old == actual_old
mutator(actual_old)
assert expected_event_new == getattr(obj, attr)
finally:
obj.remove_on_change(attr, record_trigger)
assert 1 == len(result['calls'])
call = result['calls'][0]
assert attr == call[0]
assert expected_event_old == call[1]
assert expected_event_new == call[2]
class HasListProp(Model):
foo = List(String)
def __init__(self, **kwargs):
super(HasListProp, self).__init__(**kwargs)
class TestListMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasListProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the list should count as replacing the default
obj.foo.append("hello")
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasListProp()
old_list = obj.foo
assert isinstance(old_list, PropertyValueList)
assert 1 == len(old_list._owners)
obj.foo = ["a"]
new_list = obj.foo
assert isinstance(new_list, PropertyValueList)
assert old_list is not new_list
assert 0 == len(old_list._owners)
assert 1 == len(new_list._owners)
def test_list_delitem(self):
obj = HasListProp(foo=["a", "b", "c"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c"],
["a", "c"])
def test_list_delslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1:3]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "d"])
def test_list_iadd(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x += ["b"]
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "b"])
def test_list_imul(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x *= 3
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "a", "a"])
def test_list_setitem(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[0] = "b"
self._check_mutation(obj, 'foo', mutate,
["a"],
["b"])
def test_list_setslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[1:3] = ["x"]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "x", "d"])
def test_list_append(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.append("bar"), [], ["bar"])
def test_list_extend(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.extend(["x", "y"]), [], ["x", "y"])
def test_list_insert(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.insert(1, "x"),
["a", "b"],
["a", "x", "b"])
def test_list_pop(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.pop(),
["a", "b"],
["a"])
def test_list_remove(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.remove("b"),
["a", "b"],
["a"])
def test_list_reverse(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.reverse(),
["a", "b"],
["b", "a"])
def test_list_sort(self):
obj = HasListProp(foo=["b", "a"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.sort(),
["b", "a"],
["a", "b"])
class HasStringDictProp(Model):
foo = Dict(String, Any)
def __init__(self, **kwargs):
super(HasStringDictProp, self).__init__(**kwargs)
class HasIntDictProp(Model):
foo = Dict(Int, Any)
def __init__(self, **kwargs):
super(HasIntDictProp, self).__init__(**kwargs)
class TestDictMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasStringDictProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the dict should count as replacing the default
obj.foo['bar'] = 42
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasStringDictProp()
old_dict = obj.foo
assert isinstance(old_dict, PropertyValueDict)
assert 1 == len(old_dict._owners)
obj.foo = dict(a=1)
new_dict = obj.foo
assert isinstance(new_dict, PropertyValueDict)
assert old_dict is not new_dict
assert 0 == len(old_dict._owners)
assert 1 == len(new_dict._owners)
def test_dict_delitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x['b']
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_delitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 2 : "b", 3 : "c" })
def test_dict_setitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x['b'] = 42
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=42, c=3))
def test_dict_setitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x[2] = "bar"
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 1 : "a", 2 : "bar", 3 : "c" })
def test_dict_clear(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.clear()
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict())
def test_dict_pop(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.pop('b')
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_pop_default_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
assert 42 == obj.foo.pop('z', 42)
def test_dict_popitem_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
i = obj.foo.popitem()
assert i == ('a', 1) or i == ('b', 2) or i == ('c', 3)
# we don't _check_mutation since the end value is nondeterministic
def test_dict_setdefault(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
b = x.setdefault('b', 43)
assert 2 == b
z = x.setdefault('z', 44)
assert 44 == z
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=2, c=3, z=44))
def test_dict_update(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.update(dict(b=7, c=8))
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=7, c=8))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | 1a36eb83930c055e100a4b4a2ea95ad644fd7f28 | # MetaPrint.py
# Copyright (c) 2008-2017 Chris Gonnerman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
MetaPrint.py defines a class and utility functions for use by programs
which also use MSWinPrint.py for output. MetaPrint exposes a document
class which replicates the functionality of MSWinPrint, but rather than
actually printing, a document object collects the output generated so
that it can be replayed, either via MSWinPrint or ImagePrint. This is
useful mainly to preview a print job (by running the MetaPrint document
through ImagePrint) and subsequently actually print it (via MSWinPrint).
document is a class for creating and running print jobs. Presently, the
source is the only documentation for this class.
"""
class document:
def __init__(self, desc = None, printer = None,
papersize = "letter", orientation = "portrait", duplex = "normal"):
self.font = None
self.printer = printer
self.papersize = papersize
self.orientation = orientation
self.duplex = duplex
self.page = 0
self.pagelist = []
self.pagedata = []
if desc is not None:
self.desc = desc
else:
self.desc = "MetaPrint.py print job"
def begin_document(self, desc = None):
if desc:
self.desc = desc
def end_document(self):
if self.pagedata:
self.end_page()
def end_page(self):
if self.pagedata:
self.pagelist.append(self.pagedata)
self.pagedata = []
if self.font is not None:
self.pagedata.append(self.font)
def line(self, from_, to):
self.pagedata.append(("line", (from_, to)))
def rectangle(self, box):
self.pagedata.append(("rectangle", box))
def text(self, position, text):
self.pagedata.append(("text", (position, text)))
def setfont(self, name, size, bold = None, italic = 0):
self.font = ("font", (name, size, bold, italic))
self.pagedata.append(self.font)
def image(self, position, image, size):
self.pagedata.append(("image", (position, image, size)))
def setink(self, ink):
self.pagedata.append(("setink", (ink,)))
def setfill(self, onoff):
self.pagedata.append(("setfill", (onoff,)))
def runpage(self, doc, page):
for op, args in page:
if op == "line":
doc.line(*args)
elif op == "rectangle":
doc.rectangle(args)
elif op == "text":
doc.text(*args)
elif op == "font":
doc.setfont(*args)
elif op == "image":
doc.image(*args)
elif op == "setink":
doc.setink(*args)
elif op == "setfill":
doc.setfill(*args)
doc.end_page()
def run(self, doc, pageno = None):
if pageno is None:
for page in self.pagelist:
self.runpage(doc, page)
else:
self.runpage(doc, self.pagelist[pageno])
# end of file.
|
py | 1a36eb8c349d53ceabc23949917c94c8f51c3d0f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_maintenance_configuration import *
from .maintenance_configuration import *
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:maintenance/v20180601preview:MaintenanceConfiguration":
return MaintenanceConfiguration(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "maintenance/v20180601preview", _module_instance)
_register_module()
|
py | 1a36eb8c3f4e6f4d1894d59b89c98c5020c0aeca | # Generated by Django 3.1.4 on 2020-12-31 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0002_registered_task'),
]
operations = [
migrations.AlterField(
model_name='team',
name='manager',
field=models.CharField(max_length=20),
),
]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.