blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ffa97dd0cdc7445f2d8b22a44b850c4c88178f4 | c8ed3e3997475ffb27c82ee5902def1b7b6753d0 | /src/mdscripts/insertprotein.py | 012bb07cc6c6db5f117ba6f5578effb0c4a616b2 | [
"BSD-3-Clause"
]
| permissive | awacha/mdscripts | b6174c0791ad50bfae1abacdae1f5865560bb889 | 831bda06557fa2d5f0899fc2f6552c9e49146cef | refs/heads/master | 2020-03-22T10:05:49.910710 | 2019-09-27T07:30:52 | 2019-09-27T07:30:52 | 74,377,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,762 | py | #!/usr/bin/env python
import argparse
import os
import re
import subprocess
import sys
def get_areaperlipid(areafile):
with open(areafile, 'rt', encoding='utf-8') as f:
total, upperleaflet, lowerleaflet = f.readline().split()
return float(total), float(upperleaflet), float(lowerleaflet)
def shrink(inputfile, shrinkfactor, lipidname, searchcutoff, shrunkfile, gridspacing, areafile):
result = subprocess.run(
['perl', 'inflategro.pl', inputfile, str(shrinkfactor), lipidname, str(searchcutoff), shrunkfile,
str(gridspacing), areafile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def inflate(inputfile, inflatedfile, inflategro, inflationfactor, lipidname, searchcutoff, gridspacing,
areafile='areaperlipid.dat'):
result = subprocess.run(
['perl', inflategro, inputfile, str(inflationfactor), lipidname, str(searchcutoff), inflatedfile,
str(gridspacing), areafile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
print(result.stdout)
print(result.stderr)
raise
return get_areaperlipid(areafile)
def find_lipid_indices(inputfile, lipidname):
"""Find the lipid indices in the .gro file"""
with open(inputfile, 'rt', encoding='utf-8') as f:
matches = [re.match('\s*(?P<index>\d+)%s\s+' % lipidname, l) for l
in f]
indices = {int(m.group('index')) for m in matches if m is not None}
return indices
def adjust_topology(topology, newtopology, moleculename, number):
"""Adjust the topology to have the correct number of the given molecules"""
with open(topology, 'rt', encoding='utf-8') as topin:
with open(newtopology, 'wt', encoding='utf-8') as topout:
molecules_seen = False
while topin:
l = topin.readline()
if not l:
break
if re.match('\s*\[\s*molecules\s*\]', l):
molecules_seen = True
elif re.match('\s*\[\s*', l):
molecules_seen = False
if re.match('\s*%s\s+' % moleculename, l) and molecules_seen:
topout.write('{} {:d}\n'.format(moleculename, number))
else:
topout.write(l)
def run():
parser = argparse.ArgumentParser(description="Insert a protein by using InflateGRO")
parser.add_argument('-i', action='store', dest='inflationfactor', type=float, help='Inflation factor', default=4)
parser.add_argument('-d', action='store', dest='shrinkfactor', type=float, help='Shrinking factor', default=0.95)
parser.add_argument('-l', action='store', dest='lipidname', type=str, help='Lipid name')
parser.add_argument('-f', action='store', dest='inputfile', type=str, help='Input .gro file')
parser.add_argument('-c', action='store', dest='searchcutoff', type=float, help='Search cutoff (Ångström)',
default=14)
parser.add_argument('-g', action='store', dest='gridspacing', type=float, help='Grid spacing (Ångström)', default=5)
parser.add_argument('-t', action='store', dest='topology', type=str, help='Topology file (.top)',
default='topol.top')
parser.add_argument('-m', action='store', dest='mdpfile', type=str, help='.mdp file for energy minimization',
default='minim.mdp')
parser.add_argument('-o', action='store', dest='finalgro', type=str, help='The output .gro file',
default='confout.gro')
parser.add_argument('--inflategro', action='store', dest='inflategro', type=str,
help='path to the inflategro.pl script',
default='inflategro.pl')
# parser.add_help()
args = vars(parser.parse_args())
print(args)
if (args['lipidname'] is None) or (args['inputfile'] is None):
parser.print_help()
sys.exit(1)
# inflate the lipids
indices_pre = find_lipid_indices(args['inputfile'], args['lipidname'])
inflatedfile = os.path.splitext(args['inputfile'])[0] + '_inflated.gro'
# do a dummy inflation just to calculate the area per lipid
areaperlipid = []
areaperlipid.append(inflate(args['inputfile'], os.devnull, args['inflategro'], 1.0, args['lipidname'],
args['searchcutoff'], args['gridspacing']))
# now inflate for real.
areaperlipid.append(
inflate(args['inputfile'], inflatedfile, args['inflategro'], args['inflationfactor'], args['lipidname'],
args['searchcutoff'], args['gridspacing']))
indices = find_lipid_indices(inflatedfile, args['lipidname'])
indices_removed = [i for i in indices_pre if i not in indices]
print('{:d} lipids removed during inflation:'.format(len(indices_removed)),
', '.join(str(i) for i in indices_removed))
# update the topology
topology = os.path.splitext(args['topology'])[0] + '_shrink0.top'
adjust_topology(args['topology'], topology, args['lipidname'], len(indices))
# do the enegy minimization
minimize(inflatedfile, args['mdpfile'], topology) # -> confout.gro
i = 0
while areaperlipid[-1][0] > areaperlipid[0][0]:
i += 1
print('Shrinking step #{:d}'.format(i))
# shrink the structure
indices_pre = indices
shrunkfile = os.path.splitext(args['inputfile'])[0] + '_shrunk.gro'.format(i)
areaperlipid.append(
inflate('confout.gro', shrunkfile, args['inflategro'], args['shrinkfactor'], args['lipidname'],
0, args['gridspacing'])
)
print('Area per lipid: {:f}'.format(areaperlipid[-1][0]))
indices = find_lipid_indices(shrunkfile, args['lipidname'])
indices_removed = [j for j in indices_pre if not j in indices]
print('{:d} lipids removed: {}'.format(len(indices_removed), ', '.join(str(x) for x in indices_removed)))
topology = os.path.splitext(args['topology'])[0] + '_shrink.top'.format(i)
adjust_topology(args['topology'], topology, args['lipidname'], len(indices))
minimize(shrunkfile, args['mdpfile'], topology)
print('Shrinking done. Area per lipid history:')
for apl in areaperlipid:
print('{}\t{}\t{}'.format(*apl))
finaltop = os.path.splitext(args['topology'])[0] + '_insertprotein.top'
if args['finalgro'] != 'confout.gro':
os.rename('confout.gro', args['finalgro'])
adjust_topology(args['topology'], finaltop, args['lipidname'], len(indices))
os.rename(finaltop, args['topology'])
print('You can find the final structure in {}. The topology file {} has been adjusted'.format(args['finalgro'],
args['topology']))
def minimize(grofile, mdpfile, topology, tprfile='shrinking.tpr'):
print('Minimizing...')
result = subprocess.run(['gmx', 'grompp', '-c', grofile, '-f', mdpfile, '-p', topology, '-o', tprfile],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
# print(result.stdout.decode('utf-8'))
print(result.stderr.decode('utf-8'))
# print(*(result.stdout.split('\n')))
# print(*(result.stderr.split('\n')))
raise
result = subprocess.run(['gmx', 'mdrun', '-s', tprfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
# print(result.stdout.decode('utf-8'))
print(result.stderr.decode('utf-8'))
raise
| [
"[email protected]"
]
| |
e2d092698c224e507ea31bfb207d4ece530bab92 | 9b3c4a6035f137015b0f3a4836ac1ed5a83e6047 | /test/_bsd.py | 03915b1ba004cc8bf192b23e5deda6cf39a9c99d | [
"BSD-3-Clause"
]
| permissive | goodtiding5/psutil | 8b1b5e9bc439aad7f730290e8ff697006fd9bfd9 | 6892e9e0a841cba62f64e55aff6b4f8c807e314f | refs/heads/master | 2021-01-18T06:55:51.823760 | 2016-02-04T21:04:58 | 2016-02-04T21:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,758 | py | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: (FreeBSD) add test for comparing connections with 'sockstat' cmd.
"""Tests specific to all BSD platforms. These are implicitly run by
test_psutil.
py."""
import datetime
import os
import subprocess
import sys
import time
import psutil
from psutil._common import BSD
from psutil._common import FREEBSD
from psutil._common import NETBSD
from psutil._common import OPENBSD
from psutil._compat import PY3
from test_psutil import get_test_subprocess
from test_psutil import MEMORY_TOLERANCE
from test_psutil import reap_children
from test_psutil import retry_before_failing
from test_psutil import sh
from test_psutil import unittest
from test_psutil import which
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
if os.getuid() == 0: # muse requires root privileges
MUSE_AVAILABLE = which('muse')
else:
MUSE_AVAILABLE = False
def sysctl(cmdline):
"""Expects a sysctl command with an argument and parse the result
returning only the value of interest.
"""
result = sh("sysctl " + cmdline)
if FREEBSD:
result = result[result.find(": ") + 2:]
elif OPENBSD or NETBSD:
result = result[result.find("=") + 1:]
try:
return int(result)
except ValueError:
return result
def muse(field):
"""Thin wrapper around 'muse' cmdline utility."""
out = sh('muse')
for line in out.split('\n'):
if line.startswith(field):
break
else:
raise ValueError("line not found")
return int(line.split()[1])
# =====================================================================
# --- All BSD*
# =====================================================================
@unittest.skipUnless(BSD, "not a BSD system")
class BSDSpecificTestCase(unittest.TestCase):
"""Generic tests common to all BSD variants."""
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_process_create_time(self):
cmdline = "ps -o lstart -p %s" % self.pid
p = subprocess.Popen(cmdline, shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0]
if PY3:
output = str(output, sys.stdout.encoding)
start_ps = output.replace('STARTED', '').strip()
start_psutil = psutil.Process(self.pid).create_time()
start_psutil = time.strftime("%a %b %e %H:%M:%S %Y",
time.localtime(start_psutil))
self.assertEqual(start_ps, start_psutil)
def test_disks(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -k "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total = int(total) * 1024
used = int(used) * 1024
free = int(free) * 1024
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(part.device, dev)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_cpu_count_logical(self):
syst = sysctl("hw.ncpu")
self.assertEqual(psutil.cpu_count(logical=True), syst)
def test_virtual_memory_total(self):
num = sysctl('hw.physmem')
self.assertEqual(num, psutil.virtual_memory().total)
# =====================================================================
# --- FreeBSD
# =====================================================================
@unittest.skipUnless(FREEBSD, "not a FreeBSD system")
class FreeBSDSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_boot_time(self):
s = sysctl('sysctl kern.boottime')
s = s[s.find(" sec = ") + 7:]
s = s[:s.find(',')]
btime = int(s)
self.assertEqual(btime, psutil.boot_time())
@retry_before_failing()
def test_memory_maps(self):
out = sh('procstat -v %s' % self.pid)
maps = psutil.Process(self.pid).memory_maps(grouped=False)
lines = out.split('\n')[1:]
while lines:
line = lines.pop()
fields = line.split()
_, start, stop, perms, res = fields[:5]
map = maps.pop()
self.assertEqual("%s-%s" % (start, stop), map.addr)
self.assertEqual(int(res), map.rss)
if not map.path.startswith('['):
self.assertEqual(fields[10], map.path)
def test_exe(self):
out = sh('procstat -b %s' % self.pid)
self.assertEqual(psutil.Process(self.pid).exe(),
out.split('\n')[1].split()[-1])
def test_cmdline(self):
out = sh('procstat -c %s' % self.pid)
self.assertEqual(' '.join(psutil.Process(self.pid).cmdline()),
' '.join(out.split('\n')[1].split()[2:]))
def test_uids_gids(self):
out = sh('procstat -s %s' % self.pid)
euid, ruid, suid, egid, rgid, sgid = out.split('\n')[1].split()[2:8]
p = psutil.Process(self.pid)
uids = p.uids()
gids = p.gids()
self.assertEqual(uids.real, int(ruid))
self.assertEqual(uids.effective, int(euid))
self.assertEqual(uids.saved, int(suid))
self.assertEqual(gids.real, int(rgid))
self.assertEqual(gids.effective, int(egid))
self.assertEqual(gids.saved, int(sgid))
# --- virtual_memory(); tests against sysctl
@retry_before_failing()
def test_vmem_active(self):
syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().active, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_inactive(self):
syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().inactive, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_wired(self):
syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().wired, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().cached, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().free, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
syst = sysctl("vfs.bufspace")
self.assertAlmostEqual(psutil.virtual_memory().buffers, syst,
delta=MEMORY_TOLERANCE)
# --- virtual_memory(); tests against muse
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
def test_muse_vmem_total(self):
num = muse('Total')
self.assertEqual(psutil.virtual_memory().total, num)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_active(self):
num = muse('Active')
self.assertAlmostEqual(psutil.virtual_memory().active, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_inactive(self):
num = muse('Inactive')
self.assertAlmostEqual(psutil.virtual_memory().inactive, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_wired(self):
num = muse('Wired')
self.assertAlmostEqual(psutil.virtual_memory().wired, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_cached(self):
num = muse('Cache')
self.assertAlmostEqual(psutil.virtual_memory().cached, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_free(self):
num = muse('Free')
self.assertAlmostEqual(psutil.virtual_memory().free, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_buffers(self):
num = muse('Buffer')
self.assertAlmostEqual(psutil.virtual_memory().buffers, num,
delta=MEMORY_TOLERANCE)
# =====================================================================
# --- OpenBSD
# =====================================================================
@unittest.skipUnless(OPENBSD, "not an OpenBSD system")
class OpenBSDSpecificTestCase(unittest.TestCase):
def test_boot_time(self):
s = sysctl('kern.boottime')
sys_bt = datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y")
psutil_bt = datetime.datetime.fromtimestamp(psutil.boot_time())
self.assertEqual(sys_bt, psutil_bt)
def main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BSDSpecificTestCase))
if FREEBSD:
test_suite.addTest(unittest.makeSuite(FreeBSDSpecificTestCase))
elif OPENBSD:
test_suite.addTest(unittest.makeSuite(OpenBSDSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
| [
"[email protected]"
]
| |
f271b8ae35a2d87f5a6edfd3a2164f29bfca683e | 5781bda84c1af759e7b0284f0489d50e68044c89 | /app/model/network.py | 0fd902e34350e3e8251d9ad86c8abc47d54292d6 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | Stanford-PERTS/triton | 43306a582630ac6ef8d2d14c8b2a56279335a7fb | 5a4f401fc7019d59ce4c41eafa6c5bda822fae0a | refs/heads/master | 2022-10-17T11:51:10.220048 | 2020-06-14T17:37:54 | 2020-06-14T17:37:54 | 272,251,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,984 | py | """
Network
===========
Network, one-to-one with its team, with all classroom's students participating,
comprised of growth conditions.
"""
import logging
import string
from model import SqlModel, SqlField as Field
import mysql_connection
import os_random
class InvalidNetworkAssociation(Exception):
"""Provided id(s) are circular, of the wrong kind, or otherwise invalid."""
pass
class Network(SqlModel):
table = 'network'
py_table_definition = {
'table_name': table,
'fields': [
# name, type, length, unsigned, null, default, on_update
Field('uid', 'varchar', 50, None, False, None, None),
Field('short_uid', 'varchar', 50, None, False, None, None),
Field('created', 'datetime',None, None, False, SqlModel.sql_current_timestamp, None),
Field('modified', 'datetime',None, None, False, SqlModel.sql_current_timestamp, SqlModel.sql_current_timestamp),
Field('name', 'varchar', 200, None, False, None, None),
Field('program_id', 'varchar', 50, None, False, None, None),
Field('association_ids','varchar',3500, None, False, '[]', None),
Field('code', 'varchar', 50, None, False, None, None),
],
'primary_key': ['uid'],
'indices': [
{
'unique': True,
'name': 'code',
'fields': ['code'],
},
],
'engine': 'InnoDB',
'charset': 'utf8mb4',
'collate': 'utf8mb4_unicode_ci',
}
json_props = ['association_ids']
@classmethod
def create(klass, **kwargs):
if 'code' not in kwargs:
kwargs['code'] = klass.generate_unique_code()
# else the code is specified, and if it's a duplicate, MySQL will raise
# an exception b/c there's a unique index on that field.
return super(klass, klass).create(**kwargs)
@classmethod
def generate_unique_code(klass):
chars = string.ascii_uppercase + string.digits
for x in range(5):
code = ''.join(os_random.choice(chars) for x in range(6))
matches = klass.get(code=code)
if len(matches) == 0:
break
if len(matches) > 0:
raise Exception("After five tries, could not generate a unique"
"network invitation code.")
return code
@classmethod
def query_by_user(klass, user, program_id=None):
if len(user.owned_networks) == 0:
return []
query = '''
SELECT *
FROM `{table}`
WHERE `uid` IN ({ids}) {program_clause}
ORDER BY `name`
'''.format(
table=klass.table,
ids=','.join('%s' for uid in user.owned_networks),
program_clause='AND `program_id` = %s' if program_id else ''
)
params = tuple(user.owned_networks +
([program_id] if program_id else []))
with mysql_connection.connect() as sql:
row_dicts = sql.select_query(query, params)
return [klass.row_dict_to_obj(d) for d in row_dicts]
def before_put(self, init_kwargs, *args, **kwargs):
# Allow this to raise an exception to prevent bad associations from
# being saved.
self.associated_organization_ids(pending_network=self)
if self.uid in self.association_ids:
raise InvalidNetworkAssociation(
"Networks can't reference themselves: {}".format(self.uid)
)
def associated_organization_ids(self, depth=0, pending_network=None):
"""Traverse all network-to-network relationships to associated orgs.
Returns a flat and unique list of org ids.
"""
# While we support network-to-network, this recursive function could
# generate many inefficient db calls if we get carried away.
if depth >= 4:
raise InvalidNetworkAssociation(
"Too much depth in network associations: {}"
.format(self.uid)
)
org_ids = set()
for assc_id in self.association_ids:
kind = SqlModel.get_kind(assc_id)
if kind == 'Network':
# Note! This function is often run as a before_put check that
# the associations are valid. This means we have to consider
# the as-of-yet-unsaved "root" network (the `pending_network`)
# and not any version of it we might fetch from the db in order
# to catch the introduction of circular references.
if pending_network and assc_id == pending_network.uid:
child_network = pending_network
else:
child_network = Network.get_by_id(assc_id)
if child_network:
child_org_ids = child_network.associated_organization_ids(
depth=depth + 1,
pending_network=pending_network,
)
org_ids.update(child_org_ids)
else:
# No exception here because we don't want Networks to
# become unusable if an associated thing gets deleted.
# @todo: consider having this actually remove the
# association ids from the list.
logging.warning(
"Bad reference in {}: association {} doesn't exist."
.format(self.uid, assc_id)
)
elif kind == 'Organization':
org_ids.add(assc_id)
else:
raise InvalidNetworkAssociation(
"Invalid association kind: {}".format(kind))
return org_ids
| [
"[email protected]"
]
| |
d369343fe06ed20b429fd4ebd62c24898d09f1c2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hm-.py | 053741f131237b8d1e07af0c35648012df4dd241 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hM-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
46134b5c30ca0b532262f67addad92fdbd03a9eb | 1a1b7f607c5e0783fd1c98c8bcff6460e933f09a | /core/lib/password_lib.py | 6cb7556dd99b92dd6678be4ca31f740a93006b5b | []
| no_license | smrmohammadi/freeIBS | 14fb736fcadfaea24f0acdafeafd2425de893a2d | 7f612a559141622d5042614a62a2580a72a9479b | refs/heads/master | 2021-01-17T21:05:19.200916 | 2014-03-17T03:07:15 | 2014-03-17T03:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | import crypt
import random
import re
import types
import random
def getPasswords(_count,_type,_len):
"""
generate _count password of _type, and return a list of Password instances
_type(integer): password contains 1: alphabets only, 2: digits only, 3:alphabets + digits
"""
if _type==1:
chars="abcdefghijklmnopqrstuvwxyz"
elif _type==2:
chars="1234567890"
else:
chars="abcdefghijkmnpqrstuvwxyz23456789" #don't include 1&l , 0&o they are hard to distinguish
return map(lambda x:Password(generateRandomPassword(chars,_len)),range(_count))
def generateRandomPassword(chars,_len):
"""
generate a random password from characters in "chars" and length of "_len"
"""
return "".join(map(lambda x:chars[random.randint(0,len(chars)-1)],range(_len)))
class Password:
pass_chars_match=re.compile("[^A-Za-z0-9_\-]")
def __init__(self,password):
self.password=password
def __eq__(self,password_obj):
if type(password_obj)==types.StringType:
password_obj=Password(password_obj)
if self.isMd5Hash():
enc_pass=self.getMd5Crypt()
return enc_pass==password_obj.getMd5Crypt(enc_pass)
elif password_obj.isMd5Hash():
enc_pass=password_obj.getMd5Crypt()
return enc_pass==self.getMd5Crypt(enc_pass)
else:
return self.getPassword()==password_obj.getPassword()
def checkPasswordChars(self):
"""
Check Password characters
return "1" if it's OK and "0" if it's not
"""
if not len(self.password):
return 0
if self.pass_chars_match.search(self.password) != None:
return 0
return 1
def getMd5Crypt(self,salt=None):
"""
md5crypt "self.password" with "salt",
If "salt" is None,a new salt will be randomly generated and used
If "text" is already md5crypted, return it, else return crypted pass
"""
if self.isMd5Hash():
return self.password
else:
return self.__md5Crypt(salt)
def getPassword(self):
return self.password
def __md5Crypt(self,salt):
if salt==None:
salt=self.__generateRandomSalt()
return crypt.crypt(self.password,salt)
def __generateRandomSalt(self):
salt='$1$'
for i in range(8):
rand=random.randint(0,61)
if rand<10:
salt+=str(rand)
elif rand<36:
salt+=chr(rand-10+65)
else:
salt+=chr(rand-36+97)
salt += '$'
return salt
def isMd5Hash(self):
if self.password[0:3]=='$1$':
return 1
return 0 | [
"farshad_kh"
]
| farshad_kh |
7292c8b2f5ac0b7e96916f04b5a65237836d49e9 | 766ca0a00ad1df5163306d2d5a6f722bc67002d3 | /mailviews/tests/manage.py | 1549d37fb3ba441106c14033ab25cfa33112d0f1 | [
"Apache-2.0"
]
| permissive | agroptima/django-mailviews | 8999746eff926661635160eee7b743331737f0bc | b75fabadad66a697592abb98a417f6efec55a88d | refs/heads/master | 2021-01-24T12:03:52.787509 | 2019-11-13T13:49:15 | 2019-11-13T13:49:15 | 123,114,820 | 1 | 0 | Apache-2.0 | 2019-11-13T13:49:17 | 2018-02-27T10:43:48 | Python | UTF-8 | Python | false | false | 396 | py | #!/usr/bin/env python
import logging
import sys
from mailviews.tests import settings
logging.basicConfig(level=logging.DEBUG)
if __name__ == "__main__":
try:
from django.core.management import execute_manager
execute_manager(settings)
except ImportError:
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
17c8fd8389e918362c50a26cc24b9369815a1a80 | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /examples/asr/asr_hybrid_transducer_ctc/speech_to_text_hybrid_rnnt_ctc_bpe.py | 2de150c7132853121bcc899167c134fc7ffb54d0 | [
"Apache-2.0"
]
| permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 3,432 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_hybrid_rnnt_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.devices=-1 \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecHybridRNNTCTCBPEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(
config_path="../conf/conformer/hybrid_transducer_ctc/", config_name="conformer_hybrid_transducer_ctc_bpe"
)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecHybridRNNTCTCBPEModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [
"[email protected]"
]
| |
426c7c71d941de6f532c6347173a111373cc4734 | 99052370591eadf44264dbe09022d4aa5cd9687d | /build/cwru/cwru_base/cwru_maps/catkin_generated/pkg.installspace.context.pc.py | e51fe9087a3ee4cec49b7015d1ee43de6bdeda45 | []
| no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cwru_maps"
PROJECT_SPACE_DIR = "/home/toshiki/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
]
| |
bece9a5234650c40fc71e3cf6f5df1b6f1412b8e | dcba6985b2b0d4743c2eefa44ecd0ff6dfb0e280 | /day7/note/demo_窗口滚动.py | ea934a97838f987855545fff7c0c918b1893d2a4 | []
| no_license | liyaozr/web_auto | 3c16da295ff5d6c33303f0c6176acf53f8a8cbd6 | 5a33365bfac3fc6afe07a93f9ef7935c30bc3f56 | refs/heads/master | 2021-04-13T21:49:29.677090 | 2020-04-11T07:22:17 | 2020-04-11T07:22:17 | 249,192,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(executable_path=r"d:\chromedriver.exe")
# 添加隐士等待
driver.implicitly_wait(30)
driver.get("https://www.12306.cn/index/")
# e = driver.find_element_by_class_name('mr')
# 将元素滑动到可见区域(为了点击或者其他的进一步操作)
# e.location_once_scrolled_into_view
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(2)
| [
"[email protected]"
]
| |
bc79a71833358f39c5740b9166e50e24b73bacfe | ccefb5c0a121509963c0f5e8b21567b527eee210 | /src/djangoflix/urls.py | b6c7948bad15eb7f6d4ec89961a110ea2e9f8cd3 | [
"MIT"
]
| permissive | susilthapa/DjangoFlix | 841c0a8aae21cb0eb41e7c5a19f11d86e83fc1ec | f4c544826a072c04b9a93e9929e327cfd130a360 | refs/heads/main | 2023-03-14T16:41:57.935530 | 2021-03-20T22:25:06 | 2021-03-20T22:25:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """djangoflix URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
]
| |
e000bcf1bfe5e0f03b0cc8a584f325a2051a6376 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /neural/neural-005/neunet.py | 14c865c2367af10d1782c0e97d545ba6a6697690 | []
| no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,780 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------
# Neural network
import sys
from neulev import *
# ------------------------------------------------------------------------
# Globals
verbose = 0
pgdebug = 0
def pn(num):
return "%+0.3f" % num
# ------------------------------------------------------------------------
# The whole net:
# __ __
# --| | /------| |
# | |---x | |-----
# --|__| \ /----|__|
# __ / __
# --| | / \----| |
# | |---x | |-----
# --|__| \------|__|
#
class neunet():
# --------------------------------------------------------------------
# neumap = Spec of the network to create. Layer description in
# tuple in the form of inputs, neurons, outputs
# Generally the number of outputs and neurons match as a neuron is
# defined as a neuron with one output
def __init__(self, neumap):
# Undo related
self.last_neuron = None
self.last_bias = self.last_bias2 = None
self.last_weight = None
self.last_post = None
# Store a copy of the parameters
self.neumap = neumap[:]
self.curr = 0 # Current Neuron in creation progress
# Create neurons
self.levarr = []
for ins, neus, outs in neumap:
if verbose:
print "creating level", self.curr
lev = neulev(self, ins, neus, outs)
self.levarr.append(lev)
self.curr += 1
# Diagnostic dump
def dump(self):
#print self
for bb in self.levarr:
print "Level ", self.curr
for cc in bb.membarr:
print " Neu:", self.curr, cc.num
for dd in cc.tentarr:
print " Tent:",
print " [ in:", pn(dd.input), "w:", pn(dd.weight), "m:", pn(dd.multi), \
"b:", pn(dd.bias), "b2:", pn(dd.bias2), "p:", pn(dd.post), "]"
print
print " ",
print "%+0.3f " % cc.output,
print
# Reverse the last poke
def undo(self):
if self.last_neuron != None:
self.last_neuron.bias = self.last_bias
self.last_neuron.parent.bias = self.last_bias2
self.last_neuron.weight = self.last_weight
self.last_neuron.post = self.last_post
self.last_neuron.multi = self.last_multi
self.last_neuron = None
else:
print "duplicate undo"
# Recalculate whole net
def fire(self):
xlen = len(self.levarr)
for bb in range(xlen-1, -1, -1):
if verbose:
print "firing level", bb
self.levarr[bb].fire()
if bb > 0:
self._transfer(self.levarr[bb], self.levarr[bb - 1])
#print
# Propagate down the net
def _transfer(self, src, targ):
if verbose:
print "transfer src", src.curr, "targ", targ.curr
nlen = len(src.membarr); tlen = len(targ.membarr[0].tentarr)
for aa in range(tlen): # tenticle loop
for bb in range(nlen): # neuron loop
if pgdebug > 3:
print " transfer ", "tent", aa, "neu", bb, "src", bb, src.membarr[bb].output
try:
targ.membarr[bb].tentarr[aa].input = src.membarr[aa].output
except:
print sys.exc_info()
def showin(self):
#print "NeuNet input:",
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
for bb in aa.tentarr:
print "%+0.3f" % bb.input,
print
def showout(self):
#print "NeuNet output:",
arr = self.levarr[0]
for aa in arr.membarr:
print "%+0.3f" % aa.output,
print
def getout(self):
ret = []; arr = self.levarr[0]
for aa in arr.membarr:
ret.append(aa.output)
return ret
def sum(self):
xsum = 0.
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
xsum += aa.output
return xsum
def randtip(self):
randmemb(self.levarr).randtip()
# --------------------------------------------------------------------
# Set input value on the basis of the data coming in
def setinputbits(self, val):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr);
xshift = 1; xx = 0.
#print "xlen", xlen
for aa in range(xlen):
if val & xshift != 0: xx = 1.
else: xx = 0.
print "bit", aa, ":", xx, " xshift ", xshift
for bb in range(xlen):
inparr.membarr[aa].tentarr[bb].input = xx
xshift <<= 1
print
def setinput(self, val, ignore = True):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr)
ylen = len(inparr.membarr[0].tentarr)
#print xlen, ylen, len(val)
if not ignore:
if xlen * ylen != len(val):
msg = "Input size must match network size of %d " % (xlen * ylen)
raise ValueError(msg)
cnt = 0
for aa in range(xlen):
for bb in range(ylen):
inparr.membarr[aa].tentarr[bb].input = val[cnt]
cnt += 1
# Compare outputs with expected data
def cmp(self, val):
diff = 0; outarr = self.levarr[0].membarr
xlen = len(outarr)
for aa in range(xlen):
diff += abs(val[aa] - outarr[aa].output)
return diff / xlen
# Train this particular input to expected output
def trainone(self, val, passes = 1000):
#print "origin:", ; neu.showout()
cnt = 0; cnt2 = 0
diff = 0.; old_sum = -100.
for aa in range(passes):
self.randtip()
self.fire()
diff = self.cmp(val)
if abs(diff) >= abs(old_sum):
#print sum
self.undo()
#self.fire()
#print "undone:",
else:
print " ", "%+0.3f " % diff,
cnt += 1
#neu.showout()
old_sum = diff
#if diff < 0.01:
# break
cnt2 += 1
print
return cnt
| [
"[email protected]"
]
| |
003afde634b2dbdf9963104880cecb922fe56bfa | c6818c06aacb1eca1fffa8bbc51b6f3aac25c177 | /acre/settings.py | 039fa2a786d7f1bc584f1052a125472bea4cb0ef | []
| no_license | Acon94/ACRE | 2d0769780c9f81eba05085ffd8b0af225666d6de | 73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1 | refs/heads/master | 2022-08-02T02:07:53.004308 | 2020-05-29T15:25:50 | 2020-05-29T15:25:50 | 267,840,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py | """
Django settings for acre project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
from django.contrib.messages import constants as messages
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '14tmc3zctdr=(@n&nwwoq#ms9v#)x-3*!#!5pl&%gi=v!0uh-k'
GOOGLE_MAPS_API_KEY = 'AIzaSyCXKJ3T-HIJwFLuS4aBq15Lg6tsiPcAXJ0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pages.apps.PagesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multiselectfield',
'django_google_maps',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'acre.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acre.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'acredb',
'USER':'postgres',
'password':'Oldhead@12',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS =[
os.path.join(BASE_DIR, 'acre/static')
]
# media Folder settings
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
try:
from .local_settings import *
except ImportError:
pass
| [
"[email protected]"
]
| |
14583aca318c99b89df9bdf4a06f82d336f413bd | e3e5a0618b91fe58318763f2186422b95e6edd10 | /baidupcs_py/baidupcs/api.py | 68e6ac5e0ad2875ab752460896b24e449357f92e | [
"MIT"
]
| permissive | hfh1999/BaiduPCS-Py | ddd66ff4d33d0e609021280a1edc040d51654940 | 4cf77bba7afbc8c82e0bc6ecd4ffc4a66aab1c71 | refs/heads/master | 2023-02-20T06:02:08.897248 | 2021-01-26T08:56:37 | 2021-01-26T08:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,950 | py | from typing import Optional, Dict, List, Tuple, Callable
from io import BytesIO
import datetime
from baidupcs_py.common.io import RangeRequestIO
from baidupcs_py.baidupcs.pcs import BaiduPCS, BaiduPCSError, M3u8Type
from baidupcs_py.baidupcs.inner import (
PcsFile,
PcsMagnetFile,
PcsSharedLink,
PcsSharedPath,
FromTo,
PcsAuth,
PcsUser,
PcsQuota,
CloudTask,
)
from requests_toolbelt import MultipartEncoderMonitor
from PIL import Image
from rich import print
from rich.prompt import Prompt
class BaiduPCSApi:
def __init__(
self,
bduss: Optional[str] = None,
stoken: Optional[str] = None,
ptoken: Optional[str] = None,
cookies: Dict[str, Optional[str]] = {},
user_id: Optional[int] = None,
):
self._baidupcs = BaiduPCS(
bduss, stoken=stoken, ptoken=ptoken, cookies=cookies, user_id=user_id
)
@property
def cookies(self) -> Dict[str, Optional[str]]:
return self._baidupcs.cookies
def bdstoken(self) -> Optional[str]:
return self._baidupcs.bdstoken()
def quota(self) -> PcsQuota:
info = self._baidupcs.quota()
return PcsQuota(quota=info["quota"], used=info["used"])
def meta(self, *remotepaths: str) -> List[PcsFile]:
info = self._baidupcs.meta(*remotepaths)
return [PcsFile.from_(v) for v in info.get("list", [])]
def exists(self, remotepath: str) -> bool:
return self._baidupcs.exists(remotepath)
def is_file(self, remotepath: str) -> bool:
return self._baidupcs.is_file(remotepath)
def is_dir(self, remotepath: str) -> bool:
return self._baidupcs.is_dir(remotepath)
def list(
self,
remotepath: str,
desc: bool = False,
name: bool = False,
time: bool = False,
size: bool = False,
) -> List[PcsFile]:
info = self._baidupcs.list(
remotepath, desc=desc, name=name, time=time, size=size
)
return [PcsFile.from_(v) for v in info.get("list", [])]
def upload_file(
self,
localpath: str,
remotepath: str,
ondup="overwrite",
callback: Callable[[MultipartEncoderMonitor], None] = None,
) -> PcsFile:
info = self._baidupcs.upload_file(
localpath, remotepath, ondup=ondup, callback=callback
)
return PcsFile.from_(info)
def rapid_upload_file(
self, localpath: str, remotepath: str, ondup="overwrite"
) -> PcsFile:
info = self._baidupcs.rapid_upload_file(localpath, remotepath, ondup=ondup)
return PcsFile.from_(info)
def upload_slice(
self, buf: bytes, callback: Callable[[MultipartEncoderMonitor], None] = None
) -> str:
info = self._baidupcs.upload_slice(buf, callback=callback)
return info["md5"]
def combine_slices(
self, slice_md5s: List[str], remotepath: str, ondup="overwrite"
) -> PcsFile:
info = self._baidupcs.combine_slices(slice_md5s, remotepath, ondup=ondup)
return PcsFile.from_(info)
def search(
self, keyword: str, remotepath: str, recursive: bool = False
) -> List[PcsFile]:
info = self._baidupcs.search(keyword, remotepath, recursive=recursive)
pcs_files = []
for file_info in info["list"]:
pcs_files.append(PcsFile.from_(file_info))
return pcs_files
def makedir(self, directory: str) -> PcsFile:
info = self._baidupcs.makedir(directory)
return PcsFile.from_(info)
def move(self, *remotepaths: str) -> List[FromTo]:
info = self._baidupcs.move(*remotepaths)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [move] fails")
return [FromTo(from_=v["from"], to_=v["to"]) for v in r]
def rename(self, source: str, dest: str) -> FromTo:
info = self._baidupcs.rename(source, dest)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [rename] fails")
v = r[0]
return FromTo(from_=v["from"], to_=v["to"])
def copy(self, *remotepaths: str):
info = self._baidupcs.copy(*remotepaths)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [copy] fails")
return [FromTo(from_=v["from"], to_=v["to"]) for v in r]
def remove(self, *remotepaths: str):
self._baidupcs.remove(*remotepaths)
def magnet_info(self, magnet: str) -> List[PcsMagnetFile]:
info = self._baidupcs.magnet_info(magnet)
return [PcsMagnetFile.from_(v) for v in info["magnet_info"]]
def torrent_info(self, remote_torrent: str):
self._baidupcs.torrent_info(remote_torrent)
def add_task(self, task_url: str, remotedir: str) -> str:
info = self._baidupcs.add_task(task_url, remotedir)
return str(info["task_id"])
def tasks(self, *task_ids: str) -> List[CloudTask]:
info = self._baidupcs.tasks(*task_ids)
tasks = []
for task_id, v in info["task_info"].items():
v["task_id"] = task_id
tasks.append(CloudTask.from_(v))
return tasks
def list_tasks(self) -> List[CloudTask]:
info = self._baidupcs.list_tasks()
return [CloudTask.from_(v) for v in info["task_info"]]
def clear_tasks(self) -> int:
info = self._baidupcs.clear_tasks()
return info["total"]
def cancel_task(self, task_id: str):
self._baidupcs.cancel_task(task_id)
def share(self, *remotepaths: str, password: Optional[str] = None) -> PcsSharedLink:
info = self._baidupcs.share(*remotepaths, password=password)
link = PcsSharedLink.from_(info)._replace(
paths=list(remotepaths), password=password
)
return link
def list_shared(self, page: int = 1) -> List[PcsSharedLink]:
info = self._baidupcs.list_shared(page)
return [PcsSharedLink.from_(v) for v in info["list"]]
def shared_password(self, share_id: int) -> Optional[str]:
info = self._baidupcs.shared_password(share_id)
p = info["pwd"]
if p == "0":
return None
return p
def cancel_shared(self, *share_ids: int):
self._baidupcs.cancel_shared(*share_ids)
def access_shared(
self,
shared_url: str,
password: str,
vcode_str: str = "",
vcode: str = "",
show_vcode: bool = True,
):
while True:
try:
self._baidupcs.access_shared(shared_url, password, vcode_str, vcode)
return
except BaiduPCSError as err:
if err.error_code not in (-9, -62):
raise err
if show_vcode:
if err.error_code == -62: # -62: '可能需要输入验证码'
print("[yellow]Need vcode![/yellow]")
if err.error_code == -9:
print("[red]vcode is incorrect![/red]")
vcode_str, vcode_img_url = self.getcaptcha(shared_url)
img_cn = self.get_vcode_img(vcode_img_url, shared_url)
img_buf = BytesIO(img_cn)
img_buf.seek(0, 0)
img = Image.open(img_buf)
img.show()
vcode = Prompt.ask("input vcode")
else:
raise err
def getcaptcha(self, shared_url: str) -> Tuple[str, str]:
"""Return `vcode_str`, `vcode_img_url`"""
info = self._baidupcs.getcaptcha(shared_url)
return info["vcode_str"], info["vcode_img"]
def get_vcode_img(self, vcode_img_url: str, shared_url: str) -> bytes:
return self._baidupcs.get_vcode_img(vcode_img_url, shared_url)
def shared_paths(self, shared_url: str) -> List[PcsSharedPath]:
info = self._baidupcs.shared_paths(shared_url)
uk = info["uk"]
share_id = info["shareid"]
bdstoken = info["bdstoken"]
if not info.get("file_list"):
return []
return [
PcsSharedPath.from_(v)._replace(uk=uk, share_id=share_id, bdstoken=bdstoken)
for v in info["file_list"]["list"]
]
def list_shared_paths(
self, sharedpath: str, uk: int, share_id: int, bdstoken: str
) -> List[PcsSharedPath]:
info = self._baidupcs.list_shared_paths(sharedpath, uk, share_id)
return [
PcsSharedPath.from_(v)._replace(uk=uk, share_id=share_id, bdstoken=bdstoken)
for v in info["list"]
]
def transfer_shared_paths(
self,
remotedir: str,
fs_ids: List[int],
uk: int,
share_id: int,
bdstoken: str,
shared_url: str,
):
self._baidupcs.transfer_shared_paths(
remotedir, fs_ids, uk, share_id, bdstoken, shared_url
)
def user_info(self) -> PcsUser:
info = self._baidupcs.user_info()
user_id = int(info["user"]["id"])
user_name = info["user"]["name"]
info = self._baidupcs.tieba_user_info(user_id)
age = float(info["user"]["tb_age"])
sex = info["user"]["sex"]
if sex == 1:
sex = "♂"
elif sex == 2:
sex = "♀"
else:
sex = "unknown"
auth = PcsAuth(
bduss=self._baidupcs._bduss,
cookies=self.cookies,
stoken=self._baidupcs._stoken,
ptoken=self._baidupcs._ptoken,
)
quota = self.quota()
products = self.user_products()
return PcsUser(
user_id=user_id,
user_name=user_name,
auth=auth,
age=age,
sex=sex,
quota=quota,
products=products,
)
def user_products(self) -> Dict[str, str]:
info = self._baidupcs.user_products()
pds = {}
for p in info["product_infos"]:
name = p["product_name"]
t = p["end_time"] - p["start_time"]
avail = str(datetime.timedelta(seconds=t))
pds[name] = f"Left {avail}"
pds["level"] = info["level_info"]["current_level"]
return pds
def download_link(self, remotepath: str) -> str:
info = self._baidupcs.download_link(remotepath)
return info["urls"][0]["url"]
def file_stream(
self,
remotepath: str,
callback: Callable[..., None] = None,
) -> RangeRequestIO:
return self._baidupcs.file_stream(remotepath, callback=callback)
def m3u8_stream(self, remotepath: str, type: M3u8Type = "M3U8_AUTO_720") -> str:
info = self._baidupcs.m3u8_stream(remotepath, type)
if info.get("m3u8_content"):
return info["m3u8_content"]
else:
# Here should be a error
return ""
| [
"[email protected]"
]
| |
944ba56ff7aca83e2eb127f4da13c740715ee035 | f57e34d0a708ea1139f80f8e5b968c55f6fd2621 | /dassl/utils/logger.py | 9b37774ef48a52e330761d229098b3e3627aa44b | [
"MIT"
]
| permissive | MohammadJavadD/Dassl.pytorch | bfdac8f28781af5f198eb7a1318043e04dc544d3 | 5e83fdce6fb51d8d4fbe0441a016eade2ebda423 | refs/heads/master | 2022-07-06T06:33:53.655489 | 2020-05-11T20:55:24 | 2020-05-11T20:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import os
import sys
import time
import os.path as osp
from .tools import mkdir_if_missing
__all__ = ['Logger', 'setup_logger']
class Logger(object):
"""Write console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
Args:
fpath (str): directory to save logging file.
Examples::
>>> import sys
>>> import os.path as osp
>>> save_dir = 'output/experiment-1'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def setup_logger(output=None):
if output is None:
return
if output.endswith('.txt') or output.endswith('.log'):
fpath = output
else:
fpath = osp.join(output, 'log.txt')
if osp.exists(fpath):
# make sure the existing log file is not over-written
fpath += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(fpath)
| [
"[email protected]"
]
| |
3b973ffb45eaa591cd1b658a60bc480604c2573e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2227/60668/288820.py | 84a8ef21d2e2f35a0dcb5b7d7fa5bc722b3f800e | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | def nums_6_CodeSec(n,k):
seen = set()
ans = []
def dfs(node):
for x in map(str, range(k)):
nei = node + x
if nei not in seen:
seen.add(nei)
dfs(nei[1:])
ans.append(x)
dfs("0" * (n - 1))
if n == 1 and k == 2:
print("01")
else:
print(n,k)
if __name__=='__main__':
n = int(input())
k = int(input())
nums_6_CodeSec(n,k) | [
"[email protected]"
]
| |
b37a8243749b1cbb1fb274960fb8cc5a20a84f1b | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/14195637.py | 19a1f73398d726879f251757b9c3658f6d49a240 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/14195637.py generated: Wed, 25 Jan 2017 15:25:33
#
# Event Type: 14195637
#
# ASCII decay Descriptor: [B_c+ => (D*_s+ => (D_s+ -> K- K+ pi+) gamma) (D*(2007)~0 => (D~0 -> K+ pi-) pi0) ]CC
#
from Configurables import Generation
Generation().EventType = 14195637
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_DsstDst0,Dsgamma,KKpi,D0pi0,Kpi=BcVegPy,DecProdCut,HELAMP010.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
| [
"[email protected]"
]
| |
625e3de4d65d7963f766548a6552be5ceb7e07ad | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/types/mobile_device_constant_service.py | 3cc926700a80a75c7dc7e1493076bc54422a080b | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.services',
marshal='google.ads.googleads.v8',
manifest={
'GetMobileDeviceConstantRequest',
},
)
class GetMobileDeviceConstantRequest(proto.Message):
r"""Request message for
[MobileDeviceConstantService.GetMobileDeviceConstant][google.ads.googleads.v8.services.MobileDeviceConstantService.GetMobileDeviceConstant].
Attributes:
resource_name (str):
Required. Resource name of the mobile device
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
d78dae8aa293992ac876084340178bc18620f645 | 9b617418cfadc6b6deb10c675723485ae49fb221 | /code/resources/users.py | 86659c752c29c1bed0415d2aab2b25db6338b7ac | [
"MIT"
]
| permissive | borko81/flask_udemy | 455a555b3892da6d9fee04ba53ea2408dfe23f2b | e8f9192feda1458d1ea44b62d2485e911f16acef | refs/heads/main | 2023-08-07T14:33:47.697962 | 2021-10-08T14:01:13 | 2021-10-08T14:01:13 | 411,242,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
"""
Register class, usinf reqparse from flask_restful
:validation from unique username in form and db too
"""
parser = reqparse.RequestParser()
parser.add_argument("username", type=str, required=True, help="Insert username")
parser.add_argument("password", type=str, required=True, help="Insert password")
def post(self):
data = UserRegister.parser.parse_args()
# this validata username unique
if UserModel.find_by_username(data['username']):
return {"message": "This username not allowed!"}, 400
try:
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
except sqlite3.Error as er:
raise ValueError(er)
else:
query = "INSERT INTO users VALUES (NULL, ?, ?)"
try:
cursor.execute(query, (data['username'], data['password']))
except sqlite3.Error as er:
raise ValueError(er)
else:
connection.commit()
finally:
connection.close()
return {"message": "User created successfully"}, 201
if __name__ == '__main__':
u = UserModel.find_by_username("borko")
print(u)
| [
"[email protected]"
]
| |
64183ac4cc465a42829ec69748f9176d1f426207 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Match makers/test.py | e518e3812e5879cbd0b0cddd6bd1e2e5c19ffda8 | [
"MIT"
]
| permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 606 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'4',
'1 6 9 12',
'4 12 3 9',
'4',
'2 2 2 2',
'2 2 2 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'2\n' +
'4\n')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
2590c023d108e24d8b87283bf38c9ad7246bd708 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_testifying.py | 54d585fa8f0db28a02a10621604b7a87579812f2 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py |
from xai.brain.wordbase.verbs._testify import _TESTIFY
#calss header
class _TESTIFYING(_TESTIFY, ):
def __init__(self,):
_TESTIFY.__init__(self)
self.name = "TESTIFYING"
self.specie = 'verbs'
self.basic = "testify"
self.jsondata = {}
| [
"[email protected]"
]
| |
072e40a242d378c1a17f9f2a3f62a08178177a55 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_202/64.py | 65c7985126860c68e994a7482a5134f0be8da6ab | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | q = int(input())
for case in range(1,q+1):
n,m = [int(x) for x in input().split()]
cols = [0]*n
rows = [0]*n
firstrow_plus = [0]*n
orgmatrix = [[0]*n for _ in range(n)]
matrix = [[0]*n for _ in range(n)]
backwards = [0]*(2*n-1)
forwards = [0]*(2*n-1)
points = 0
for _ in range(m):
c,b,a = input().split()
a = int(a)-1
b = int(b)-1
if c == 'x' or c == 'o':
cols[a] += 1
rows[b] += 1
points += 1
orgmatrix[b][a] += 1
if c == '+' or c == 'o':
c1,c2 = a+b,a-b
backwards[c2]+=1
forwards[c1]+=1
firstrow_plus[a] += 1
points += 1
orgmatrix[b][a] += 2
numbackwards = [0]*(2*n-1)
numforwards = [0]*(2*n-1)
for i in range(n):
for j in range(n):
c1,c2 = i+j,i-j
numbackwards[c2]+=1
numforwards[c1]+=1
def cover(pos):
i,j = pos
c1,c2 = i+j,i-j
return numbackwards[c2] + numforwards[c1]
poi = [(i,j) for i in range(n) for j in range(n)]
poi.sort(key = lambda x: cover(x))
for pos in poi:
i,j = pos
c1,c2 = i+j,i-j
if backwards[c2]== 0 and forwards[c1] == 0:
matrix[j][i] += 2
points += 1
backwards[c2]+=1
forwards[c1]+=1
i = 0
j = 0
while i < n and j < n:
while i < n and rows[i]>0:
i+=1
while j<n and cols[j]>0:
j+=1
if i >= n or j >= n:
continue
rows[i] += 1
cols[j] += 1
matrix[i][j] += 1
points += 1
#for j in range(n):
# if firstrow_plus[j] == 0:
# matrix[0][j] += 2
# points += 1
#for j in range(1,n-1):
# matrix[n-1][j] += 2
# points += 1
changes = 0
for i in range(n):
for j in range(n):
if matrix[i][j]>0:
changes += 1
print('Case #%d: %d %d' %(case,points,changes))
for i in range(n):
for j in range(n):
if matrix[i][j]==1:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('x %d %d' %(i+1,j+1))
elif matrix[i][j]==2:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('+ %d %d' %(i+1,j+1))
elif matrix[i][j]>2:
print('o %d %d' %(i+1,j+1))
#prmat = [['.']*n for _ in range(n)]
#for i in range(n):
# for j in range(n):
# dumhet = matrix[i][j] + orgmatrix[i][j]
# if dumhet == 1:
# prmat[i][j] = 'x'
# elif dumhet == 2:
# prmat[i][j] = '+'
# elif dumhet == 3:
# prmat[i][j] = 'o'
#for i in range(n):
# print(*prmat[i])
| [
"[email protected]"
]
| |
cf8a4e8ee9ab5bec84107722532d44c30bb836ac | 0494c9caa519b27f3ed6390046fde03a313d2868 | /commit-queue/tests/buildbot_json_test.py | 561e59a3ca46c6615888e6b6922332b1b8423227 | [
"BSD-3-Clause"
]
| permissive | mhcchang/chromium30 | 9e9649bec6fb19fe0dc2c8b94c27c9d1fa69da2c | 516718f9b7b95c4280257b2d319638d4728a90e1 | refs/heads/master | 2023-03-17T00:33:40.437560 | 2017-08-01T01:13:12 | 2017-08-01T01:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,606 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for buildbot_json.py."""
import json
import logging
import os
import cStringIO
import StringIO
import sys
import unittest
import urllib
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ROOT_DIR, '..'))
import find_depot_tools # pylint: disable=W0611
from testing_support import auto_stub
# in tests/
import reduce_test_data # pylint: disable=F0401
# In root
import buildbot_json
class BuildbotJsonTest(auto_stub.TestCase):
def setUp(self):
super(BuildbotJsonTest, self).setUp()
# Default mock.
self.old_urlopen = self.mock(urllib, 'urlopen', self.mockurlopen)
self.mock(sys, 'stderr', cStringIO.StringIO())
self.mock(sys, 'stdout', cStringIO.StringIO())
self.mock(buildbot_json.time, 'time', lambda: 1325394000.01)
self.url = 'http://build.chromium.org/p/tryserver.chromium'
self.datadir = os.path.join(ROOT_DIR, 'data')
if not os.path.isdir(self.datadir):
os.mkdir(self.datadir)
self.test_id = self.id().split('BuildbotJsonTest.', 1)[1]
self.filepath = os.path.join(self.datadir, self.test_id) + '.json'
self.queue = []
self.training = False
if os.path.isfile(self.filepath):
self.queue = json.load(open(self.filepath))
# Auto upgrade old data.
for i in xrange(len(self.queue)):
url = self.queue[i][0]
if not url.endswith('filter=1'):
if '?' in url:
url += '&filter=1'
else:
url += '?filter=1'
self.queue[i][0] = url
logging.warn('Auto-convert to training because missing filter=1.')
self.training = True
self.queue_index = 0
self.reducer = reduce_test_data.Filterer()
def tearDown(self):
try:
if not self.has_failed():
if self.queue_index < len(self.queue):
self.queue = self.queue[:self.queue_index]
logging.warning('Auto-convert to training because of queue overflow')
self.training = True
if self.training:
json.dump(self.queue, open(self.filepath, 'w'), separators=(',',':'))
self.assertEqual(self.queue_index, len(self.queue))
self.assertOut('stderr', '')
self.assertOut('stdout', '')
else:
if self.training:
logging.error('Not saving data even if in training mode.')
finally:
# Make sure the super class tearDown() function is called so stubs are
# removed.
super(BuildbotJsonTest, self).tearDown()
if self.training:
self.fail(
'Don\'t worry, it\'s just updating internal files. Please run '
'again.\n%s' % '\n'.join(q[0] for q in self.queue))
def assertOut(self, out, expected):
"""Check stderr/stdout and resets it."""
self.assertEqual(str(expected), str(getattr(sys, out).getvalue()))
self.mock(sys, out, cStringIO.StringIO())
def mockurlopen(self, url):
self.assertTrue(self.queue_index <= len(self.queue))
if self.queue_index != len(self.queue):
expected_url, data = self.queue[self.queue_index]
if url != expected_url:
logging.warn(
'Auto-convert to training because %s != %s.' % (url, expected_url))
self.training = True
# Delete the remainder of the queue.
self.queue = self.queue[:self.queue_index]
if self.queue_index == len(self.queue):
data = self.old_urlopen(url).read()
self.training = True
# Re-filter it.
try:
data = json.loads(data)
except ValueError:
self.fail('Failed to decode %s' % url)
expected_url, new_data = self.reducer.filter_response(url, data)
assert new_data
new_data_json = json.dumps(new_data, separators=(',',':'))
if self.queue_index == len(self.queue):
self.queue.append((url, new_data_json))
elif new_data != data:
logging.warn(
'Auto-convert to training because url %s\n%s != %s.' % (
url, data, new_data))
self.queue[self.queue_index] = [url, new_data_json]
self.training = True
channel = StringIO.StringIO(new_data_json)
channel.headers = '<mocked headers>'
self.queue_index += 1
return channel
def testCommands(self):
# Assert no new command was added, otherwise a test needs to be written.
expected = [
'busy',
'builds',
'count',
'current',
'disconnected',
'help',
'idle',
'interactive',
'last_failure',
'pending',
'run',
]
actual = [i[3:] for i in dir(buildbot_json) if i.startswith('CMD')]
self.assertEqual(sorted(expected), sorted(actual))
for i in actual:
self.assertTrue(hasattr(self, 'testCMD' + i))
def testCMDbusy(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDbusy(parser, [self.url, '-b', 'linux']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDbuilds(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDbuilds(
parser, [self.url, '-b', 'linux', '-s', 'vm146-m4', '-q']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDcount(self):
self.mock(buildbot_json.time, 'time', lambda: 1348166285.56)
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDcount(
parser, [self.url, '-b', 'linux', '-o' '360']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDdisconnected(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDdisconnected(parser, [self.url]))
self.assertOut(
'stdout',
'vm112-m4\nvm122-m4\nvm124-m4\nvm131-m4\nvm134-m4\nvm139-m4\nvm143-m4\n'
'vm146-m4\nvm157-m4\nvm162-m4\nvm165-m4\nvm60-m4\nvm62-m4\nvm64-m4\n')
def testCMDhelp(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDhelp(parser, []))
# No need to check exact output here.
# pylint: disable=E1101
self.assertTrue(
'show program\'s version number and exit\n' in sys.stdout.getvalue())
self.mock(sys, 'stdout', cStringIO.StringIO())
def testCMDidle(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDidle(parser, [self.url, '--builder', 'linux_clang']))
self.assertOut(
'stdout', 'Builder linux_clang: vm104-m4, vm113-m4, vm165-m4\n')
def testCMDinteractive(self):
self.mock(sys, 'stdin', cStringIO.StringIO('exit()'))
parser = buildbot_json.gen_parser()
try:
# TODO(maruel): Real testing.
buildbot_json.CMDinteractive(parser, [self.url])
self.fail()
except SystemExit:
pass
self.assertOut(
'stderr',
'Buildbot interactive console for "http://build.chromium.org'
'/p/tryserver.chromium".\nHint: Start with typing: '
'\'buildbot.printable_attributes\' or \'print str(buildbot)\' to '
'explore.\n')
self.assertOut('stdout', '>>> ')
def testCMDlast_failure(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDlast_failure(
parser, [self.url, '-b', 'linux', '--step', 'compile']))
self.assertOut(
'stdout',
'27369 on vm136-m4: blame:[email protected]\n'
'27367 on vm158-m4: blame:[email protected]\n')
def testCMDpending(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDpending(parser, [self.url]))
self.assertOut('stdout',
"Builder linux_touch: 2\n"
" revision: HEAD\n change:\n comment: u''\n"
" who: [email protected]\n revision: HEAD\n change:\n"
" comment: u''\n who: [email protected]\n")
def testCMDcurrent(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDcurrent(parser, [self.url]))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDrun(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDrun(
parser, [self.url, "print '\\n'.join(buildbot.builders.keys)"]))
self.assertOut('stdout', 'linux\nlinux_clang\nlinux_touch\n')
def testCurrentBuilds(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
actual = []
for builder in b.builders:
self.assertEqual([], list(builder.current_builds.cached_children))
i = 0
last_build = None
for c in builder.current_builds:
self.assertEqual(builder, c.builder)
actual.append(str(c))
i += 1
last_build = c
if i:
self.assertEqual(last_build.number, builder.builds[-1].number)
self.assertEqual(i, len(list(builder.current_builds.cached_children)))
builder.current_builds.discard()
self.assertEqual([], list(builder.current_builds.cached_children))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.json'
if self.training or not os.path.isfile(filepath):
json.dump(actual, open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertEqual(expected, actual)
def test_builds_reverse(self):
# Check the 2 last builds from 'linux' using iterall() instead of
# __iter__(). The test also confirms that the build object itself is not
# loaded.
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
actual = []
for b in b.builders['linux'].builds.iterall():
actual.append(b.number)
# When using iterall() the Build data is delay loaded:
assert b._data is None # pylint: disable=W0212
if len(actual) == 2:
break
filepath = os.path.join(self.datadir, self.test_id) + '_expected.json'
if self.training or not os.path.isfile(filepath):
json.dump(actual, open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertEqual(expected, actual)
def test_build_results(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
# builds.data['results'] is not present.
self.assertEqual(
buildbot_json.SUCCESS, b.builders['linux_clang'].builds[1638].result)
self.assertEqual(
buildbot_json.SUCCESS,
b.builders['linux_clang'].builds[1638].steps[0].result)
def test_build_steps_keys(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
build = b.builders['linux_clang'].builds[1638]
#self.assertEqual([0, 1, 2, 3], build.steps.keys)
# Grab cached version. There is none.
actual = [step for step in build.steps.cached_children]
self.assertEqual([], actual)
# Force load.
actual = [step for step in build.steps]
self.assertEqual(
[buildbot_json.SUCCESS] * 4, [step.result for step in actual])
self.assertEqual(
[True] * 4, [step.simplified_result for step in actual])
self.assertEqual(4, len(actual))
# Grab cached version.
actual = [step for step in build.steps.cached_children]
self.assertEqual(
[buildbot_json.SUCCESS] * 4, [step.result for step in actual])
self.assertEqual(4, len(actual))
def test_repr(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual('<Builder key=linux>', repr(b.builders['linux']))
self.assertEqual("<Builders keys=['linux']>", repr(b.builders))
def test_refresh(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual(True, b.refresh())
def test_build_step_cached_data(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
build = 30157
self.assertEqual(
None, b.builders['linux'].current_builds[build].steps[0].cached_data)
b.builders['linux'].current_builds[build].steps[0].cache()
self.assertEqual(
'update_scripts',
b.builders['linux'].current_builds[build].steps[0].name)
self.assertEqual(
['browser_tests', 'ui_tests'],
b.builders['linux'].current_builds[build].steps.failed)
self.assertEqual(
2,
b.builders['linux'].current_builds[build].steps[2
].cached_data['step_number'])
b.refresh()
# cache_keys() does the same thing as cache().
b.builders['linux'].current_builds[build].steps.cache_keys()
def test_contains(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertTrue('linux' in b.builders)
self.assertEqual(3, len(list(b.builders.cached_children)))
try:
# The dereference of an invalid key when keys are cached will throw an
# exception.
# pylint: disable=W0104
b.builders['non_existent']
self.fail()
except KeyError:
pass
def test_slaves(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual(11, len(b.slaves.names))
self.assertEqual(False, b.slaves['mini34-m4'].connected)
def test_build_revision(self):
class Root(object):
@staticmethod
def read(_):
return {'sourceStamp': {'revision': 321}}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(321, build.revision)
def test_build_revision_none(self):
class Root(object):
@staticmethod
def read(_):
return {}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(None, build.revision)
def test_build_duration(self):
class Root(object):
@staticmethod
def read(_):
return {'times': [3, 15]}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(12, build.duration)
self.assertEqual(3, build.start_time)
self.assertEqual(15, build.end_time)
def test_build_duration_none(self):
class Root(object):
@staticmethod
def read(_):
return {}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(None, build.duration)
self.assertEqual(None, build.start_time)
self.assertEqual(None, build.end_time)
def test_build_steps_names(self):
class Root(object):
@staticmethod
def read(url): # pylint: disable=E0213
self.assertEqual('123', url)
return {'steps': [{'name': 'a'}, {'name': 'b'}]}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(['a', 'b'], build.steps.keys)
def test_build_step_duration(self):
class Root(object):
@staticmethod
def read(_):
return {'steps': [{'times': [3, 15], 'isStarted': True}]}
build = buildbot_json.Build(Root(), '123', None)
build_step = buildbot_json.BuildStep(buildbot_json.BuildSteps(build), 0)
self.assertEqual(12, build_step.duration)
self.assertEqual(True, build_step.is_running)
self.assertEqual(True, build_step.is_started)
self.assertEqual(False, build_step.is_finished)
def test_build_step_duration_none(self):
class Root(object):
@staticmethod
def read(_):
return {'steps': [{}]}
build = buildbot_json.Build(Root(), '123', None)
build_step = buildbot_json.BuildStep(buildbot_json.BuildSteps(build), 0)
self.assertEqual(None, build_step.duration)
if __name__ == '__main__':
logging.basicConfig(level=
[logging.WARN, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
| [
"[email protected]"
]
| |
1cc1d640e21ab0f100c76bc623beac7409d3eb4f | 2b01f8f7ee7e841f64629f88896478d3b4221480 | /flatpages_i18n/middleware.py | 70654537b556fffc508c62e7155e802524587996 | [
"BSD-3-Clause"
]
| permissive | PragmaticMates/django-flatpages-i18n | fde6821774419c1a3cbafe317c5c3c91730339a5 | 434903e60518e0c1a54f0aea24e1d3e8eb0449bd | refs/heads/master | 2023-06-09T18:13:11.192887 | 2023-06-05T07:32:10 | 2023-06-05T07:32:10 | 6,462,420 | 16 | 19 | BSD-3-Clause | 2019-07-12T07:03:15 | 2012-10-30T18:40:25 | Python | UTF-8 | Python | false | false | 817 | py | # from django.conf import settings
# from django.http import Http404
# from django.utils.deprecation import MiddlewareMixin
#
# from flatpages_i18n.views import flatpage
#
#
# class FlatpageFallbackMiddleware(MiddlewareMixin):
# def process_response(self, request, response):
# if response.status_code != 404:
# # No need to check for a flatpage for non-404 responses.
# return response
# try:
# return flatpage(request, request.path_info)
# # Return the original response if any errors happened. Because this
# # is a middleware, we can't assume the errors will be caught elsewhere.
# except Http404:
# return response
# except:
# if settings.DEBUG:
# raise
# return response
| [
"[email protected]"
]
| |
0feb26db0b3e1ad462a9a055b1f25937d285fe82 | 3f327d2654b85b922909925b9f475315d78f4652 | /Backend/newsapi/lib/python2.7/site-packages/newsapi/sources.py | a2865f6348bc04ca28a13159efcf5462a1d5167c | [
"MIT"
]
| permissive | brianwang1217/SelfImprovementWebApp | 8db45914027537aee9614f9d218c93cc08dc90f8 | 7892fc4ee5434307b74b14257b29a5f05a0a0dd7 | refs/heads/master | 2022-12-13T15:01:08.595735 | 2018-06-23T04:46:06 | 2018-06-23T04:46:06 | 137,548,289 | 1 | 1 | MIT | 2022-05-25T01:28:29 | 2018-06-16T02:48:52 | Python | UTF-8 | Python | false | false | 4,265 | py | from newsapi.base_news import BaseNews
class Sources(BaseNews):
def __init__(self, API_KEY):
super(Sources, self).__init__(API_KEY)
self.endpoint = "https://newsapi.org/v1/sources"
self.sources = []
self.sources_base_info = {}
self.sources_id_info = {}
self.categories = {}
self.languages = {}
self.countries = {}
def get(self, category="", language="", country="", attributes_format=True):
self.payload['category'] = category
self.payload['language'] = language
self.payload['country'] = country
r = self.requests.get(self.endpoint, params=self.payload)
if r.status_code != 200:
raise BaseException("Either server didn't respond or has resulted in zero results.")
try:
content = r.json()
except ValueError:
raise ValueError("No json data could be retrieved.")
if attributes_format:
return self.AttrDict(content)
return content
def all(self):
return self.get()
def get_by_category(self, category):
return self.get(category=category)
def get_by_language(self, language):
return self.get(language=language)
def get_by_country(self, country):
return self.get(country=country)
def information(self):
content = self.get()
self.sources = content.sources
for index, source in enumerate(self.sources):
category_name = source['category']
language_name = source['language']
country_name = source['country']
identifier = source['id']
name = source['name']
desciption = source['description']
url = source['url']
urls_to_logos = source['urlsToLogos']
sort_bys_available = source['sortBysAvailable']
self.sources_base_info[name] = url
self.sources_id_info[name] = identifier
temp_dict = {
"id": identifier, "name": name,
"description": desciption, "url": url,
"urls_to_logos": urls_to_logos,
'sort_bys_available': sort_bys_available
}
if category_name in self.categories:
self.categories[category_name].append([temp_dict])
else:
self.categories[category_name] = [temp_dict]
if language_name in self.languages:
self.languages[language_name].append([temp_dict])
else:
self.languages[language_name] = [temp_dict]
if country_name in self.countries:
self.countries[country_name].append([temp_dict])
else:
self.countries[country_name] = [temp_dict]
return self
def all_sorted_information(self):
return self.sources
def all_categories(self, detailed=False):
if detailed:
return self.categories
return self.categories.keys()
def all_languages(self, detailed=False):
if detailed:
return self.languages
return self.languages.keys()
def all_countries(self, detailed=False):
if detailed:
return self.countries
return self.countries.keys()
def all_base_information(self):
return self.sources_base_info
def all_ids(self, detailed=False):
if detailed:
return self.sources_id_info
return self.sources_id_info.values()
def all_names(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.keys()
def all_urls(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.values()
def search(self, name):
matches = []
if not self.sources:
self.information()
for source in self.sources:
if name.lower() in source['name'].lower():
matches.append(source)
if not matches:
return "No match found!"
return matches
| [
"[email protected]"
]
| |
c8742c451b8f04a368affdf326423791bc8e12b1 | 92237641f61e9b35ff6af6294153a75074757bec | /Algorithm/programmers/lv1_약수의 합.py | efcd4bdb217f84c5119f9de34b91c09be9bd7a7f | []
| no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 513 | py | """
문제 설명
정수 n을 입력받아 n의 약수를 모두 더한 값을 리턴하는 함수, solution을 완성해주세요.
제한 사항
n은 0 이상 3000이하인 정수입니다.
입출력 예
n return
12 28
5 6
입출력 예 설명
입출력 예 #1
12의 약수는 1, 2, 3, 4, 6, 12입니다. 이를 모두 더하면 28입니다.
입출력 예 #2
5의 약수는 1, 5입니다. 이를 모두 더하면 6입니다.
"""
def solution(n):
return sum([i for i in range(1, n//2+1) if n % i == 0]) + n | [
"[email protected]"
]
| |
14f52a34bc9255f5bb49c7dae494fd7834405746 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/ancprange_5c7b16a33b225110a96fb24def386cf1.py | c5c1d643faab09f0a08e0e592081cf841b68970c | []
| no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123,256 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class AncpRange(Base):
"""ANCP Range
The AncpRange class encapsulates a list of ancpRange resources that are managed by the user.
A list of resources can be retrieved from the server using the AncpRange.find() method.
The list can be managed by using the AncpRange.add() and AncpRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ancpRange'
_SDM_ATT_MAP = {
'AccessAggregationCbTlv': 'accessAggregationCbTlv',
'AtmVci': 'atmVci',
'AtmVpi': 'atmVpi',
'CircuitId': 'circuitId',
'DistributionAlgorithmPercent': 'distributionAlgorithmPercent',
'EnableAccessAggregation': 'enableAccessAggregation',
'Enabled': 'enabled',
'InnerVlanId': 'innerVlanId',
'Name': 'name',
'NasAncpServicePort': 'nasAncpServicePort',
'NasIPAddressIncr': 'nasIPAddressIncr',
'NasIpAddress': 'nasIpAddress',
'NasIpAddressIncr': 'nasIpAddressIncr',
'NasKeepAliveRetries': 'nasKeepAliveRetries',
'NasKeepAliveTimeout': 'nasKeepAliveTimeout',
'ObjectId': 'objectId',
'OuterVlanId': 'outerVlanId',
'UseDslInnerVlan': 'useDslInnerVlan',
'UseDslOuterVlan': 'useDslOuterVlan',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AncpRange, self).__init__(parent, list_op)
@property
def AncpAtmRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpatmrange_143d9caa5302cd12e3b869aef31fa963.AncpAtmRange): An instance of the AncpAtmRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpatmrange_143d9caa5302cd12e3b869aef31fa963 import AncpAtmRange
if self._properties.get('AncpAtmRange', None) is not None:
return self._properties.get('AncpAtmRange')
else:
return AncpAtmRange(self)._select()
@property
def AncpIpRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpiprange_839eec134c905181ca26fbb79aa00c6a.AncpIpRange): An instance of the AncpIpRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpiprange_839eec134c905181ca26fbb79aa00c6a import AncpIpRange
if self._properties.get('AncpIpRange', None) is not None:
return self._properties.get('AncpIpRange')
else:
return AncpIpRange(self)._select()
@property
def AncpMacRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpmacrange_d7adfce512ae33443d921aa75806d22c.AncpMacRange): An instance of the AncpMacRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpmacrange_d7adfce512ae33443d921aa75806d22c import AncpMacRange
if self._properties.get('AncpMacRange', None) is not None:
return self._properties.get('AncpMacRange')
else:
return AncpMacRange(self)._select()
@property
def AncpPvcRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancppvcrange_2439e131bb1475c2b8a3037752391f88.AncpPvcRange): An instance of the AncpPvcRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancppvcrange_2439e131bb1475c2b8a3037752391f88 import AncpPvcRange
if self._properties.get('AncpPvcRange', None) is not None:
return self._properties.get('AncpPvcRange')
else:
return AncpPvcRange(self)._select()
@property
def AncpVlanRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpvlanrange_7b49b7726b3a28e72b7a61873dd18f8b.AncpVlanRange): An instance of the AncpVlanRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpvlanrange_7b49b7726b3a28e72b7a61873dd18f8b import AncpVlanRange
if self._properties.get('AncpVlanRange', None) is not None:
return self._properties.get('AncpVlanRange')
else:
return AncpVlanRange(self)._select()
@property
def DslProfileAllocationTable(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslprofileallocationtable_0f362d03bbc7a8b4a0c98fc2d749d061.DslProfileAllocationTable): An instance of the DslProfileAllocationTable class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslprofileallocationtable_0f362d03bbc7a8b4a0c98fc2d749d061 import DslProfileAllocationTable
if self._properties.get('DslProfileAllocationTable', None) is not None:
return self._properties.get('DslProfileAllocationTable')
else:
return DslProfileAllocationTable(self)
@property
def DslResyncProfileAllocationTable(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslresyncprofileallocationtable_e84fe32c361c2c736b5bd2f1a6d3c33f.DslResyncProfileAllocationTable): An instance of the DslResyncProfileAllocationTable class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslresyncprofileallocationtable_e84fe32c361c2c736b5bd2f1a6d3c33f import DslResyncProfileAllocationTable
if self._properties.get('DslResyncProfileAllocationTable', None) is not None:
return self._properties.get('DslResyncProfileAllocationTable')
else:
return DslResyncProfileAllocationTable(self)
@property
def AccessAggregationCbTlv(self):
# type: () -> str
"""
Returns
-------
- str: Enable Access Aggregation Circuit ID Binary TLV
"""
return self._get_attribute(self._SDM_ATT_MAP['AccessAggregationCbTlv'])
@AccessAggregationCbTlv.setter
def AccessAggregationCbTlv(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['AccessAggregationCbTlv'], value)
@property
def AtmVci(self):
# type: () -> int
"""
Returns
-------
- number: Custom VPI.
"""
return self._get_attribute(self._SDM_ATT_MAP['AtmVci'])
@AtmVci.setter
def AtmVci(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['AtmVci'], value)
@property
def AtmVpi(self):
# type: () -> int
"""
Returns
-------
- number: Custom VCI.
"""
return self._get_attribute(self._SDM_ATT_MAP['AtmVpi'])
@AtmVpi.setter
def AtmVpi(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['AtmVpi'], value)
@property
def CircuitId(self):
# type: () -> str
"""
Returns
-------
- str: Circuit ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['CircuitId'])
@CircuitId.setter
def CircuitId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['CircuitId'], value)
@property
def DistributionAlgorithmPercent(self):
# type: () -> int
"""
Returns
-------
- number: DSL_Subscriber-per-AN distribution scattering model.
"""
return self._get_attribute(self._SDM_ATT_MAP['DistributionAlgorithmPercent'])
@DistributionAlgorithmPercent.setter
def DistributionAlgorithmPercent(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['DistributionAlgorithmPercent'], value)
@property
def EnableAccessAggregation(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable Access Aggregation Circuit Binary.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAccessAggregation'])
@EnableAccessAggregation.setter
def EnableAccessAggregation(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableAccessAggregation'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def InnerVlanId(self):
# type: () -> int
"""
Returns
-------
- number: Custom inner VLAN ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerVlanId'])
@InnerVlanId.setter
def InnerVlanId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerVlanId'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NasAncpServicePort(self):
# type: () -> int
"""
Returns
-------
- number: NAS Ancp Service Port.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasAncpServicePort'])
@NasAncpServicePort.setter
def NasAncpServicePort(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasAncpServicePort'], value)
@property
def NasIPAddressIncr(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: NAS IP Increment.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIPAddressIncr'])
@NasIPAddressIncr.setter
def NasIPAddressIncr(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIPAddressIncr'], value)
@property
def NasIpAddress(self):
# type: () -> str
"""
Returns
-------
- str: NAS IP.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIpAddress'])
@NasIpAddress.setter
def NasIpAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIpAddress'], value)
@property
def NasIpAddressIncr(self):
# type: () -> str
"""
Returns
-------
- str: NAS IP Increment.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIpAddressIncr'])
@NasIpAddressIncr.setter
def NasIpAddressIncr(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIpAddressIncr'], value)
@property
def NasKeepAliveRetries(self):
# type: () -> int
"""
Returns
-------
- number: NAS Keep Alive Retries.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasKeepAliveRetries'])
@NasKeepAliveRetries.setter
def NasKeepAliveRetries(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasKeepAliveRetries'], value)
@property
def NasKeepAliveTimeout(self):
# type: () -> int
"""
Returns
-------
- number: NAS Keep Alive Timeout, in seconds
"""
return self._get_attribute(self._SDM_ATT_MAP['NasKeepAliveTimeout'])
@NasKeepAliveTimeout.setter
def NasKeepAliveTimeout(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasKeepAliveTimeout'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def OuterVlanId(self):
# type: () -> int
"""
Returns
-------
- number: Custom outer VLAN ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['OuterVlanId'])
@OuterVlanId.setter
def OuterVlanId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['OuterVlanId'], value)
@property
def UseDslInnerVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: Use DSL subscriber inner VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['UseDslInnerVlan'])
@UseDslInnerVlan.setter
def UseDslInnerVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseDslInnerVlan'], value)
@property
def UseDslOuterVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: Use actual DSL outer VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['UseDslOuterVlan'])
@UseDslOuterVlan.setter
def UseDslOuterVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseDslOuterVlan'], value)
def update(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, int, bool, bool) -> AncpRange
"""Updates ancpRange resource on the server.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, int, bool, bool) -> AncpRange
"""Adds a new ancpRange resource on the server and adds it to the container.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Returns
-------
- self: This instance with all currently retrieved ancpRange resources using find and the newly added ancpRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ancpRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, ObjectId=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, str, int, bool, bool) -> AncpRange
"""Finds and retrieves ancpRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ancpRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ancpRange resources from the server.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- ObjectId (str): Unique identifier for this object
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Returns
-------
- self: This instance with matching ancpRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ancpRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ancpRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def AncpBringUpDslSubscribers(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpBringUpDslSubscribers operation on the server.
Bring up DSL subscribers
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpBringUpDslSubscribers(async_operation=bool)
-----------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpBringUpDslSubscribers(Arg2=enum, async_operation=bool)
----------------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpBringUpDslSubscribers', payload=payload, response_object=None)
def AncpStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStart operation on the server.
Negotiate ANCP and PPPoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStart(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStart(Arg2=enum, async_operation=bool)
------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStart', payload=payload, response_object=None)
def AncpStartAdjacency(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStartAdjacency operation on the server.
Start ANCP adjacency
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStartAdjacency(async_operation=bool)
----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStartAdjacency(Arg2=enum, async_operation=bool)
---------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStartAdjacency', payload=payload, response_object=None)
def AncpStartResync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStartResync operation on the server.
Start resync ANCP DSL line capabilities for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStartResync(Arg2=number, Arg3=number, async_operation=bool)
---------------------------------------------------------------
- Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- Arg3 (number): Number of ReSync iterations performed by DSL Lines in selected ANCP ranges.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStartResync(Arg2=number, Arg3=number, Arg4=enum, async_operation=bool)
--------------------------------------------------------------------------
- Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- Arg3 (number): Number of ReSync iterations performed by DSL Lines in selected ANCP ranges.
- Arg4 (str(async | sync)): Time gap between ReSync terations performed by DSL Lines in selected ANCP ranges [seconds].
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStartResync', payload=payload, response_object=None)
def AncpStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStop operation on the server.
Teardown ANCP and PPPoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStop(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStop(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStop', payload=payload, response_object=None)
def AncpStopAdjacency(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStopAdjacency operation on the server.
Stop ANCP adjacency
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStopAdjacency(async_operation=bool)
---------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStopAdjacency(Arg2=enum, async_operation=bool)
--------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStopAdjacency', payload=payload, response_object=None)
def AncpTeardownDslSubscribers(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpTeardownDslSubscribers operation on the server.
Tear down DSL subscribers
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpTeardownDslSubscribers(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpTeardownDslSubscribers(Arg2=enum, async_operation=bool)
-----------------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpTeardownDslSubscribers', payload=payload, response_object=None)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Negotiate sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(Arg2=enum, async_operation=bool)
--------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Teardown sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(Arg2=enum, async_operation=bool)
-------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
]
| |
79a1d1b99544c0df6ff3fa556be040c933b22cd8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /4AtqpqKdXAFofa566_16.py | ea18541665d34bf4ba2b25fbe302826b6e09da0e | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """
Create a function that takes in a _number as a string_ `n` and returns the
number **without trailing and leading zeros**.
* **Trailing Zeros** are the zeros _after_ a decimal point which _don't affect the value_ (e.g. the _last three_ zeros in `3.4000` and `3.04000`).
* **Leading Zeros** are the zeros _before_ a whole number which _don't affect the value_ (e.g. the _first three_ zeros in `000234` and `000230`).
### Examples
remove_leading_trailing("230.000") ➞ "230"
remove_leading_trailing("00402") ➞ "402"
remove_leading_trailing("03.1400") ➞ "3.14"
remove_leading_trailing("30") ➞ "30"
### Notes
* Return a **string**.
* If you get a number with `.0` on the end, return the _integer value_ (e.g. return `"4"` rather than `"4.0"`).
* If the number is `0`, `0.0`, `000`, `00.00`, etc... return `"0"`.
"""
def remove_leading_trailing(n):
f = (float(n))
i = int(f)
if (f == float(i)):
return str(i)
else:
return str(f)
| [
"[email protected]"
]
| |
f36ffffea1f4374ec233376ec27a22b0aaeb5bf5 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/thread_contention_test.py | 518e5dcd40cd8122d86907338a77f8d5d156ebea | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 416 | py | from thread import start_new_thread
import time
work = []
done = []
def run(num):
for i in xrange(num):
t = work.pop()
work.append(t - 1)
done.append(num)
print "starting!"
nthreads = 2
N = 100000
for i in xrange(nthreads):
work.append(N)
for i in xrange(nthreads):
t = start_new_thread(run, (N,))
while len(done) < nthreads:
time.sleep(0)
# print work
assert sum(work) == 0
| [
"[email protected]"
]
| |
37e9be961201df01808e80969b39de4b082dcbb1 | c4b87e23c0ca100fe42a3f63ddf5ae0f914b08d6 | /utils/feedparser.py | 92419bc4d0729ac2e923e9876c8d333470a48f7d | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | TinLe/NewsBlur | 4cfc14952b03ff207b00ca85cf59a804a53d0179 | 881e558071bbd0048a65ed67d63d4366ee05cbe8 | refs/heads/master | 2023-05-27T02:35:16.183150 | 2022-11-22T16:09:05 | 2022-11-22T16:09:05 | 98,898,911 | 0 | 0 | MIT | 2023-05-22T21:41:15 | 2017-07-31T14:37:45 | Python | UTF-8 | Python | false | false | 160,115 | py | """Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.2.1"
__license__ = """
Copyright 2010-2015 Kurt McKee <[email protected]>
Copyright 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
'''
:return: A :class:`FeedParserDict`.
'''
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
'''
:return: A :class:`FeedParserDict`.
'''
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords
if element in ('category', 'tags', 'itunes_keywords'):
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = output.replace('&', '&')
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%ss' % key, [FeedParserDict()])[-1]
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, detail)
if author:
detail['name'] = author
if email:
detail['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_dcterms_valid(self, attrsD):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict(term=term, scheme=scheme, label=label)
if value not in tags:
tags.append(value)
def _start_tags(self, attrsD):
# This is a completely-made up element. Its semantics are determined
# only by a single feed that precipitated bug report 392 on Google Code.
# In short, this is junk code.
self.push('tags', 1)
def _end_tags(self):
for term in self.pop('tags').split(','):
self._addTag(term.strip(), None, None)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _end_media_keywords(self):
for term in self.pop('media_keywords').split(','):
if term.strip():
self._addTag(term.strip(), None, None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_rating(self, attrsD):
context = self._getContext()
context.setdefault('media_rating', attrsD)
self.push('rating', 1)
def _end_media_rating(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._getContext()
context['media_rating']['content'] = rating
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')]
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video',
'object', 'embed', 'iframe', 'param'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang',
'allowfullscreen'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set([
'annotation',
'annotation-xml',
'maction',
'maligngroup',
'malignmark',
'math',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msubsup',
'msup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover',
'none',
'semantics',
])
mathml_attributes = set([
'accent',
'accentunder',
'actiontype',
'align',
'alignmentscope',
'altimg',
'altimg-height',
'altimg-valign',
'altimg-width',
'alttext',
'bevelled',
'charalign',
'close',
'columnalign',
'columnlines',
'columnspacing',
'columnspan',
'columnwidth',
'crossout',
'decimalpoint',
'denomalign',
'depth',
'dir',
'display',
'displaystyle',
'edge',
'encoding',
'equalcolumns',
'equalrows',
'fence',
'fontstyle',
'fontweight',
'form',
'frame',
'framespacing',
'groupalign',
'height',
'href',
'id',
'indentalign',
'indentalignfirst',
'indentalignlast',
'indentshift',
'indentshiftfirst',
'indentshiftlast',
'indenttarget',
'infixlinebreakstyle',
'largeop',
'length',
'linebreak',
'linebreakmultchar',
'linebreakstyle',
'lineleading',
'linethickness',
'location',
'longdivstyle',
'lquote',
'lspace',
'mathbackground',
'mathcolor',
'mathsize',
'mathvariant',
'maxsize',
'minlabelspacing',
'minsize',
'movablelimits',
'notation',
'numalign',
'open',
'other',
'overflow',
'position',
'rowalign',
'rowlines',
'rowspacing',
'rowspan',
'rquote',
'rspace',
'scriptlevel',
'scriptminsize',
'scriptsizemultiplier',
'selection',
'separator',
'separators',
'shift',
'side',
'src',
'stackalign',
'stretchy',
'subscriptshift',
'superscriptshift',
'symmetric',
'voffset',
'width',
'xlink:href',
'xlink:show',
'xlink:type',
'xmlns',
'xmlns:xlink',
])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
:return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates.
Converts asctime to RFC822-compatible dates and uses the RFC822 parser
to do the actual parsing.
Supported formats (format is standardized to the first one listed):
* {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy
* {weekday name} {month name} dd hh:mm:ss yyyy
"""
parts = dt.split()
# Insert a GMT timezone, if needed.
if len(parts) == 5:
parts.insert(4, '+0000')
# Exit if there are not six parts.
if len(parts) != 6:
return None
# Reassemble the parts in an RFC822-compatible order and parse them.
return _parse_date_rfc822(' '.join([
parts[0], parts[2], parts[1], parts[5], parts[3], parts[4],
]))
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
lazy_chardet_encoding = None
tried_encodings = []
if chardet:
def lazy_chardet_encoding():
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
return chardet_encoding
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if callable(proposed_encoding):
proposed_encoding = proposed_encoding()
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
:return: A :class:`FeedParserDict`.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
| [
"[email protected]"
]
| |
227c8e7d7c7faf708582ddde5050af8f34a85ecd | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_4/models/smtp_server_get_response.py | b9d7c1f33db6e8adf4bc96d6abeba9d4958fb2b7 | [
"BSD-2-Clause"
]
| permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,221 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.4, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_4 import models
class SmtpServerGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[SmtpServer]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.SmtpServer]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[SmtpServer])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SmtpServerGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SmtpServerGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmtpServerGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
dbe872211a4755d36304647f012be4e14b572c81 | 8da76aabcf9cfea3478f56037edbb5fa1513140b | /maisemapaikka/dev_jt_01/maisemapaikka/apps/geomaps/widgets.py | c1ab884dc936bd34d6cf3c3c038d530f9564201b | []
| no_license | mikanyman/.virtualenvs-legacy | 039479f31f2ca9f9a3d3544d8837429ddd0a7492 | 5486128b5b3b7ddb9ec81d43e3bb601a23b4025a | refs/heads/master | 2020-12-31T07:10:07.018881 | 2017-02-01T02:16:55 | 2017-02-01T02:16:55 | 80,566,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from django import forms
from django.db import models
from django.conf import settings
class LocationPickerWidget(forms.TextInput):
class Media:
css = {
'all': (
settings.ADMIN_MEDIA_PREFIX + 'css/location_picker.css',
)
}
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js',
'http://www.google.com/jsapi?key=ABQIAAAA4NIx2jg3c_um-4n9lSUsUBQpzvvHaH8wLU269kY3vQUW6nVQBRTnCoPQWn83MqmlDy6i0XFj9TqLxw',
settings.ADMIN_MEDIA_PREFIX + 'js/jquery.location_picker.js',
)
def __init__(self, attrs=None):
super(LocationPickerWidget, self).__init__(attrs=attrs)
def render(self, name, value, attrs=None):
if attrs == None:
attrs = {}
attrs['class'] = 'location_picker'
return super(LocationPickerWidget, self).render(name, value, attrs)
class LocationField(models.CharField):
def formfield(self, **kwargs):
kwargs['widget'] = LocationPickerWidget
return super(LocationField, self).formfield(**kwargs) | [
"[email protected]"
]
| |
46b43aae14e1edbb429984d0ea5f710a308d5c7d | 7433bb9a3e3a1ea89314c05a0ca22f52323eb33a | /task_LSTM_inbuild/step3_evaluate_line.py | a40d65169d18b78db2ed6db0a1619992e589b2ea | [
"Apache-2.0"
]
| permissive | tianyunzqs/text_classifier_tasks | 685a70c70216865c28204d48bdbf9a1239edea86 | 444ac6676547f4e3ee0ccd5fb36439e8e02f56a9 | refs/heads/master | 2022-11-05T00:31:09.070088 | 2022-10-17T02:46:38 | 2022-10-17T02:46:38 | 175,388,072 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py | # -*- coding: utf-8 -*-
# @Time : 2019/7/26 10:18
# @Author : tianyunzqs
# @Description :
import os
import sys
import numpy as np
import tensorflow as tf
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from task_LSTM_inbuild.data_helper import Vocab, CategoryDict
from task_LSTM_inbuild.text_lstm import LSTM_Model
# lstm 需要的参数
def get_default_params():
return tf.contrib.training.HParams(
num_embedding_size = 16, # 每个词语的向量的长度
# 指定 lstm 的 步长, 一个sentence中会有多少个词语
# 因为执行的过程中是用的minibatch,每个batch之间还是需要对齐的
# 在测试时,可以是一个变长的
num_timesteps = 50, # 在一个sentence中 有 50 个词语
num_lstm_nodes = [32, 32], # 每一层的size是多少
num_lstm_layers = 2, # 和上句的len 是一致的
# 有 两层 神经单元,每一层都是 32 个 神经单元
num_fc_nodes = 32, # 全连接的节点数
batch_size = 100,
clip_lstm_grads = 1.0,
# 控制lstm的梯度,因为lstm很容易梯度爆炸或者消失
# 这种方式就相当于给lstm设置一个上限,如果超过了这个上限,就设置为这个值
learning_rate = 0.001,
num_word_threshold = 10, # 词频太少的词,对于模型训练是没有帮助的,因此设置一个门限
)
hps = get_default_params() # 生成参数对象
def load_model():
vocab_file = 'D:/alg_file/data/cnews/cnews.vocab.txt'
category_file = 'D:/alg_file/data/cnews/cnews.category.txt'
vocab = Vocab(vocab_file, hps.num_word_threshold)
category = CategoryDict(category_file)
graph = tf.Graph() # 为每个类(实例)单独创建一个graph
sess = tf.Session(graph=graph) # 创建新的sess
with sess.as_default():
with graph.as_default():
lstm = LSTM_Model(hps, vocab.size(), category.size())
saver = tf.train.Saver() # defaults to saving all variables - in this case w and b
# Initialize all variables
sess.run(tf.global_variables_initializer())
checkpoint_dir = os.path.abspath(os.path.join(os.path.curdir, "checkpoints"))
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
return vocab, category, lstm, sess
vocab, category, lstm, sess = load_model()
def evaluate_line(text):
id_words = vocab.sentence_to_id(text)
id_words = id_words[0: hps.num_timesteps]
padding_num = hps.num_timesteps - len(id_words)
id_words = id_words + [vocab.unk for _ in range(padding_num)]
batch_x = [id_words] * hps.batch_size
_, predict_label = sess.run(
[lstm.train_op, lstm.y_pred],
feed_dict={
lstm.inputs: np.array(batch_x),
lstm.outputs: np.array([0] * hps.batch_size),
lstm.keep_prob: 1.0
}
)
return category.id_to_category.get(predict_label[0])
if __name__ == '__main__':
import time
# 财经
text = """交银货币清明假期前两日暂停申购和转换入全景网3月30日讯 交银施罗德基金周一公告称,公司旗下的交银施罗德货币市场证券投资基金将于2009年"清明"假期前两日暂停申购和转换入业务。公告表示,交银施罗德货币将于2009年4月2日、3日两天暂停办理基金的申购和转换入业务。转换出、赎回等其他业务以及公司管理的其他开放式基金的各项交易业务仍照常办理。自2009年4月7日起,所有销售网点恢复办理基金的正常申购和转换入业务。(全景网/雷鸣)"""
t1 = time.time()
label = evaluate_line(text=text)
t2 = time.time()
print(label)
print('cost time: {0}ms'.format(t2 - t1))
| [
"[email protected]"
]
| |
f9c790eb2cc47ba5039ad06c28c4aece60bbd206 | 8e0149f00f82f57216584b53180ec02870dee7e8 | /python/linked_list/heap/lc23.py | 80140fd3c24babfcf1832732796fa94aec1ba01e | []
| no_license | chao-ji/LeetCode | 5880e0fa91d84ad70b5abd47e24ac75646fdcdf9 | 69a960dd8f39e9c8435a3678852071e1085fcb72 | refs/heads/master | 2020-12-13T03:35:42.009204 | 2019-06-15T04:45:38 | 2019-06-15T04:45:38 | 51,043,575 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | py | """23. Merge k Sorted Lists
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
import heapq
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Solution1, use min-heap
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists:
return None
# We have `k` linked lists
# a11 < a12 < a13 < ... a1n1
# a21 < a22 < a21 < ... a2n2
# ...
# ak1 < ak2 < ak3 < ... aknk
# The smallest number must be the minimum of a11, a21, ..., ak1
# Say it's a11.
# What will be the next smallest number?
# It can't be `a13`, ..., `a1n1`, because we have `a12` smaller than them.
# It can't be `a22`, ..., `a2nk`, because we have `a21` smaller than them.
# ...
# It can't be `ak2`, ..., `aknk`, because we have `ak1` smaller than them.
# So again, the next smallest number must be the minimum of a12, a21, ..., ak1
# We know how to merge two sorted lists: LC 21 Merge Two Sorted Lists
# We can use the same approach:
# 1. scan the first node of all remainling non-empty linked lists,
# and find which one is the smallest
# 2. remove that from the original linked list and add to the new,
# growing linked list
# 3. repeat step 1 and 2
# However, this would require us to scan all linked list over and over
# again.
# KEY Idea:
# When we scan the first node of all linked lists, if we put them in
# min-heap (keyed on the node's value), we can easily extract the node
# with minimum value in time O(log k), and insert its successor in the original
# linked list that it came from
# Initialize the growing linked list
# `dummy.next` always points to the start of the growing list, initially empty
# `curr` always points to the last node of the growling list, initially empty
curr = dummy = ListNode(0)
# initialize `heap` with 2-tuples (node's key, node) using the first
# node (i.e. curr) of all linked lists
heap = [(node.val, node) for node in lists if node]
heapq.heapify(heap)
while heap:
# Extract the node with minimum value from heap
_, node = heapq.heappop(heap)
# take note of the successor of `node`
next = node.next
# disconnect it from the rest of the linked list it's from
node.next = None
# add to the growing linked list
curr.next = node
curr = curr.next
# insert the successor of the popped node, if it's non-empty
if next:
heapq.heappush(heap, (next.val, next))
return dummy.next
# Solution2, divide & conquer
# time: O(N*logk), N = total number of nodes, k = number of lists
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# KEY Insight: divide and conquer
# If the number of lists is 2, we already know how to merge two
# sorted lists. See LC 21 Merge Two Sorted Lists.
# We can split the `lists in two halves:
# list_1, list_2, ... list_m
#
# list_m+1, list_m+2, ..., list_k
# And recursively merge the lists in the first half and second half.
#
# Let `left` and `right` be the outcomes of the two recursion.
#
# Since they are already sorted, we can simply merge them into a single
# sorted list.
# Time complexity:
#
# The height of the recursion is O(log k), and in each level
# of recursion, the number of nodes to be visited is at most O(N) over
# all merges, where `N` is the total number of nodes
# In total: we have O(N*logk)
# Base case:
# when the number of lists <= 1
if len(lists) == 0:
return None
elif len(lists) == 1:
return lists[0]
size = len(lists)
left = self.mergeKLists(lists[:size//2])
right = self.mergeKLists(lists[size//2:])
merged = node = ListNode(0)
l1 = left
l2 = right
while l1 and l2:
if l1.val < l2.val:
node.next = l1
l1 = l1.next
else:
node.next = l2
l2 = l2.next
node = node.next
if l1:
node.next = l1
if l2:
node.next = l2
return merged.next
| [
"[email protected]"
]
| |
e6cdba46aeece3e020f759b8414108b144310136 | 255dc7ff8fb676027021a674bd624fb6587fa2f7 | /compiler/tests/22_sram_func_test.py | 3a7ff5a3e9521545a1be95c25171bfa119148a16 | [
"BSD-3-Clause"
]
| permissive | orbe7947/OpenRAM | 80b40462fb7c1044fdacf34908432820b71f6092 | 29c5ab48f0a82972337f4b17ee90695ff1f8f825 | refs/heads/master | 2021-08-16T11:35:10.528368 | 2017-11-14T21:24:14 | 2017-11-14T21:24:14 | 110,760,794 | 0 | 0 | null | 2017-11-15T00:14:12 | 2017-11-15T00:14:12 | null | UTF-8 | Python | false | false | 1,788 | py | #!/usr/bin/env python2.7
"""
Run a regresion test on various srams
"""
import unittest
from testutils import header
import sys,os
sys.path.append(os.path.join(sys.path[0],".."))
import globals
import debug
import calibre
OPTS = globals.get_opts()
#@unittest.skip("SKIPPING 21_timing_sram_test")
class sram_func_test(unittest.TestCase):
def runTest(self):
OPTS.analytical_delay = False
globals.init_openram("config_20_{0}".format(OPTS.tech_name))
# we will manually run lvs/drc
OPTS.check_lvsdrc = False
import sram
debug.info(1, "Testing timing for sample 1bit, 16words SRAM with 1 bank")
s = sram.sram(word_size=OPTS.config.word_size,
num_words=OPTS.config.num_words,
num_banks=OPTS.config.num_banks,
name="sram_func_test")
OPTS.check_lvsdrc = True
import delay
tempspice = OPTS.openram_temp + "temp.sp"
s.sp_write(tempspice)
probe_address = "1" * s.addr_size
probe_data = s.word_size - 1
debug.info(1, "Probe address {0} probe data {1}".format(probe_address, probe_data))
d = delay.delay(s,tempspice)
d.set_probe(probe_address,probe_data)
# This will exit if it doesn't find a feasible period
import tech
load = tech.spice["FF_in_cap"]*4
slew = tech.spice["rise_time"]*2
feasible_period = d.find_feasible_period(load,slew)
os.remove(tempspice)
OPTS.analytical_delay = True
globals.end_openram()
# instantiate a copdsay of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
| [
"[email protected]"
]
| |
1da2e624120594b03a58fd0253262dd9d3ce45bb | cb836bde47c790c7ad990d44d86d60c13f43a2a0 | /markdown_it/token.py | 0c03ac1fc8f64c41339533f0ce672a0994fe315f | [
"MIT"
]
| permissive | iooxa/markdown-it-py | 133028a981af715ce244554e26b92b16fc4443ac | 21837dfa0ce9be249de372bb10733a534f8e0a50 | refs/heads/master | 2022-11-19T14:23:23.618106 | 2020-07-20T15:57:16 | 2020-07-20T15:57:16 | 281,160,226 | 0 | 0 | MIT | 2020-07-20T15:57:17 | 2020-07-20T15:50:02 | null | UTF-8 | Python | false | false | 5,690 | py | from typing import List, Optional, Tuple, Union
import attr
@attr.s(slots=True)
class Token:
# Type of the token (string, e.g. "paragraph_open")
type: str = attr.ib()
# html tag name, e.g. "p"
tag: str = attr.ib()
# Level change (number in {-1, 0, 1} set), where:
# - `1` means the tag is opening
# - `0` means the tag is self-closing
# - `-1` means the tag is closing
nesting: int = attr.ib()
# Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`
attrs: Optional[list] = attr.ib(default=None)
# Source map info. Format: `[ line_begin, line_end ]`
map: Optional[Tuple[int, int]] = attr.ib(default=None)
# nesting level, the same as `state.level`
level: int = attr.ib(default=0)
# An array of child nodes (inline and img tokens)
children: Optional[List["Token"]] = attr.ib(default=None)
# In a case of self-closing tag (code, html, fence, etc.),
# it has contents of this tag.
content: str = attr.ib(default="")
# '*' or '_' for emphasis, fence string for fence, etc.
markup: str = attr.ib(default="")
# fence infostring
info: str = attr.ib(default="")
# A place for plugins to store an arbitrary data
meta: dict = attr.ib(factory=dict)
# True for block-level tokens, false for inline tokens.
# Used in renderer to calculate line breaks
block: bool = attr.ib(default=False)
# If it's true, ignore this element when rendering.
# Used for tight lists to hide paragraphs.
hidden: bool = attr.ib(default=False)
def attrIndex(self, name: str) -> int:
if not self.attrs:
return -1
for i, at in enumerate(self.attrs):
if at[0] == name:
return i
return -1
def attrPush(self, attrData: Tuple[str, str]):
"""Add `[ name, value ]` attribute to list. Init attrs if necessary."""
if self.attrs:
self.attrs.append(attrData)
else:
self.attrs = [attrData]
def attrSet(self, name: str, value: str):
"""Set `name` attribute to `value`. Override old value if exists."""
idx = self.attrIndex(name)
if idx < 0:
self.attrPush([name, value])
else:
self.attrs[idx] = [name, value]
def attrGet(self, name: str) -> str:
""" Get the value of attribute `name`, or null if it does not exist."""
idx = self.attrIndex(name)
if idx >= 0:
return self.attrs[idx][1]
return None
def attrJoin(self, name, value):
"""Join value to existing attribute via space.
Or create new attribute if not exists.
Useful to operate with token classes.
"""
idx = self.attrIndex(name)
if idx < 0:
self.attrPush([name, value])
else:
self.attrs[idx][1] = self.attrs[idx][1] + " " + value
def copy(self):
"""Return a shallow copy of the instance."""
return attr.evolve(self)
def as_dict(self, children=True, filter=None, dict_factory=dict):
"""Return the token as a dict.
:param bool children: Also convert children to dicts
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
"""
return attr.asdict(
self, recurse=children, filter=filter, dict_factory=dict_factory
)
@classmethod
def from_dict(cls, dct):
token = cls(**dct)
if token.children:
token.children = [cls.from_dict(c) for c in token.children]
return token
@attr.s(slots=True)
class NestedTokens:
"""A class that closely resembles a Token,
but for a an opening/closing Token pair, and their containing children.
"""
opening: Token = attr.ib()
closing: Optional[Token] = attr.ib()
children: List[Union[Token, "NestedTokens"]] = attr.ib(factory=list)
def __getattr__(self, name):
return getattr(self.opening, name)
def attrGet(self, name: str) -> str:
""" Get the value of attribute `name`, or null if it does not exist."""
return self.opening.attrGet(name)
def nest_tokens(tokens: List[Token]) -> List[Union[Token, NestedTokens]]:
"""Convert the token stream to a list of tokens and nested tokens.
``NestedTokens`` contain the open and close tokens and a list of children
of all tokens in between (recursively nested)
"""
output = []
tokens = list(reversed(tokens))
while tokens:
token = tokens.pop()
if token.nesting == 0:
token = token.copy()
output.append(token)
if token.children:
token.children = nest_tokens(token.children)
continue
assert token.nesting == 1, token.nesting
nested_tokens = [token]
nesting = 1
while tokens and nesting != 0:
token = tokens.pop()
nested_tokens.append(token)
nesting += token.nesting
if nesting != 0:
raise ValueError(f"unclosed tokens starting {nested_tokens[0]}")
child = NestedTokens(nested_tokens[0], nested_tokens[-1])
output.append(child)
child.children = nest_tokens(nested_tokens[1:-1])
return output
| [
"[email protected]"
]
| |
b6723850310e650934ca18886791a71dee495084 | 2fd8f1cafdabfdf9507a1a7e232e13ac7756767f | /data/data_models.py | ca393ea6ffc156bcc2ffa70f40a7fab6b96bb7a7 | []
| no_license | dewmal/fx_agent_sma | 34f3571fe37bfc18c72b8f9ec101dbbe5610a0bb | 7ecec6ab432d8d43daa6d9cb4a838b1ade1e0c13 | refs/heads/master | 2020-06-16T10:16:20.359791 | 2019-07-17T06:09:52 | 2019-07-17T06:09:52 | 195,536,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,508 | py | import datetime
from utils import round_time
class TickStream:
__type__ = "tick_stream"
symbol: str
ask: float
bid: float
quote: float
epoch: int
value: float
def __init__(self, tickId, symbol, ask, bid, quote, epoch, version="1.0") -> None:
super().__init__()
self.tickId = tickId
self.bid = bid
self.ask = ask
self.symbol = symbol
self.quote = quote
self.epoch = epoch
self.version = version
self.value = (self.ask + self.bid) / 2
def as_dict(self):
return {
"tickId": self.tickId,
"symbol": self.symbol,
"ask": self.ask,
"bid": self.bid,
"quote": self.quote,
"epoch": self.epoch,
}
@classmethod
def from_dict(cls, tick_data):
# print(tick_data['pair'])
return TickStream(
tick_data['tickId'],
tick_data['symbol'],
tick_data['ask'],
tick_data['bid'],
tick_data['quote'],
tick_data['epoch']
)
def __str__(self) -> str:
return f"{self.as_dict()}"
class TickWindow:
__type__ = "window_stream"
open = 0
high = 0
low = 0
close = 0
epoch = 0
symbol: str
last_epoch_time = 0
def __init__(self, open, high, low, close, epoch, symbol, last_epoch_time=0, tick_list=[], id=None) -> None:
self.id = id
self.last_epoch_time = last_epoch_time
self.open = open
self.high = high
self.low = low
self.close = close
self.epoch = epoch
self.symbol = symbol
self.tick_list = tick_list
def as_dict(self):
return {
"open": self.open,
"high": self.high,
"low": self.low,
"close": self.close,
"epoch": self.epoch,
"symbol": self.symbol,
"last_epoch_time": self.last_epoch_time,
}
@classmethod
def from_dict(cls, _data):
return TickWindow(
_data['open'],
_data['high'],
_data['low'],
_data['close'],
_data['epoch'],
_data['symbol'],
_data['last_epoch_time'],
[],
_data['_id'],
)
@staticmethod
def from_tick_list(tick_list: [TickStream]):
if len(tick_list) > 0:
open_tick = tick_list[0]
high_tick = max(tick_list, key=lambda tick: tick.value)
low_tick = min(tick_list, key=lambda tick: tick.value)
close_tick = tick_list[-1]
return TickWindow(open_tick.value, high_tick.value, low_tick.value, close_tick.value,
round_time(datetime.datetime.fromtimestamp(open_tick.epoch)).timestamp(),
open_tick.symbol,
tick_list)
else:
return None
def __str__(self) -> str:
return f"{self.symbol} OLHC - {self.open},{self.high},{self.low},{self.close},{self.epoch},{self.last_epoch_time},{self.id}"
class TIData:
def __init__(self, name, time_interval, epoch, data, symbol) -> None:
super().__init__()
self.time_interval = time_interval
self.symbol = symbol
self.data = data
self.epoch = epoch
self.name = name
def __str__(self):
return f"{self.name}-{self.time_interval},{self.data},{self.epoch},{self.symbol}"
| [
"[email protected]"
]
| |
3d817787469b94efb5701656e528c260991baace | d5751e2f2b2128079d3473cf14b02c67515dba72 | /flask_fundamentals/2.form_test/server.py | e5183bece53a77565141c9f57f8e5f49966d5057 | []
| no_license | seymakara/dojo_python | 814ed49b561703e3a993a1ade0f084c234e82b13 | ff8a56020d9ab337d930ec4ce4039f0bca2cfead | refs/heads/master | 2021-05-13T20:44:27.327035 | 2018-01-10T06:32:27 | 2018-01-10T06:32:27 | 116,917,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
# our index route will handle rendering our form
@app.route('/')
def index():
return render_template("index.html")
# this route will handle our form submission
# notice how we defined which HTTP methods are allowed by this route
@app.route('/users', methods=['POST'])
def create_user():
print "Got Post Info"
# we'll talk about the following two lines after we learn a little more
# about forms
name = request.form['name']
email = request.form['email']
# redirects back to the '/' route
return redirect('/')
app.run(debug=True) # run our server
| [
"[email protected]"
]
| |
6aaab26b75b53923e8a74876b16f34d30fbe0c44 | 3c5657492c401994eaaebcf16c2b13a5ebc0efd8 | /cresthh/tools/reduceSWW.py | 768a6bf1a89c6bb4c6c7d17e1ebe895143fd9d7a | []
| no_license | peggypan0411/CREST-iMAP | 0b01d1123f7be7806971ead4835ea2e7f61f81a9 | b2d80e2c3eb3fb575c678915fd89a96bdb30dbde | refs/heads/master | 2023-07-16T08:11:26.736833 | 2021-04-14T21:44:02 | 2021-04-14T21:44:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,949 | py | #!/home/ZhiLi/CRESTHH/python2/bin/python2
'''
A command line tool to get reduced geotiffs quickly
__author__: Zhi Li
__Date__: 2021/02/07
'''
import argparse
import numpy as np
import sys
sys.path.append('/home/ZhiLi/CRESTHH')
from cresthh.anuga.file_conversion.sww2dem import sww2dem
from cresthh.anuga import SWW_plotter
from netCDF4 import Dataset
from osgeo import gdal
import os
import numpy as np
import matplotlib.tri as mtri
from pyproj import CRS, transform
def export_tif(dst, lons, lats, arr, sample):
# print arr.shape, lons.shape, lats.shape
rows, cols= arr.shape
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(dst, cols, rows, 1, gdal.GDT_Float32)
outdata.SetGeoTransform([lons[0], np.diff(lons)[0],0,
lats[0], 0, np.diff(lats)[0]])##sets same geotransform as input
outdata.SetProjection(sample.GetProjection())##sets same projection as input
outdata.GetRasterBand(1).WriteArray(arr)
outdata.GetRasterBand(1).SetNoDataValue(-9999)##if you want these values transparent
outdata.FlushCache() ##saves to disk!!
outdata = None
band=None
ds=None
parser= argparse.ArgumentParser(description='Quick retrieval of flood depth\nAuthor: Allen Zhi Li\nDate: 2021/02/07')
parser.add_argument('--sww', type=str, metavar='sww file', required=True,
help='SWW file to be retrieved from')
parser.add_argument('--dst', type=str, metavar='destination', required=True,
help='File path to store transformed file')
parser.add_argument('--tif', type=bool, metavar='output GeoTiff', required=False,
default=True, help='Whether output tif format, default True')
parser.add_argument('--quantity', type=str, metavar='output quantity', required=False,
default='depth', help= 'which quantity to output, default depth')
parser.add_argument('--reduce', type=str, metavar='reduction', required=False,
default='max', help= 'choose a method to reduce time dimension, default max.')
parser.add_argument('--tr', type=float, metavar='resolution', required=False,
default=None, help= 'choose whether to rescale image, default 10m; method: bilinear interpolation')
parser.add_argument('--s_srs', type=str, required=False, default="EPSG:32215", help= 'source projection system')
parser.add_argument('--t_srs', type=str, required=False, default="EPSG:4326", help= 'target projection system')
parser.add_argument('--interp', type=str, required=False, default="square", help= 'interpolation method')
parser.add_argument('--DSM', type=str, required=False, default=None, help="surface elevation model to use")
parser.add_argument('--flood_fill', type=bool, required=False, default=False, help="whether to use flood fill")
if __name__=='__main__':
args= parser.parse_args()
sww_file= args.sww
dst= args.dst
isTiff= args.tif
toReduce= args.reduce
res= args.tr
quantity= args.quantity
s_srs= args.s_srs
t_srs= args.t_srs
interp= args.interp
dsm= args.DSM
ifFloodFill= args.flood_fill
base_name=dst.split('.')[:-1]
if quantity not in ['depth', 'xmomentum', 'elevation', 'ymomentum', 'excRain']:
raise ValueError('expected quantity in ["depth", "xmomentum", "elevation", "ymomentum", "excRain"]')
if toReduce=='max':
reduce=max
elif toReduce=='mean':
reduce=mean
else:
reduce= int(toReduce) #choose time series
if res is None:
res=10
if interp=='square':
#use inherent 2nd order extrapolation
sww2dem(sww_file, base_name+'.asc', quantity=quantity, verbose=True, reduction=reduce, cellsize=res)
if isTiff:
os.system('gdalwarp -co COMPRESS=LZW -ot Float32 -s_srs %s -t_srs %s %s %s'%(s_srs, t_srs, base_name+'.asc', base_name+'.tif'))
os.system('rm %s'%(base_name+'.asc'))
os.system('rm %s'%(base_name+'.prj'))
elif interp in ['linear', 'cubic']:
# use Triangulation interpolation and refined with digital surface model
if dsm is None:
msg= "you have to provide a surface elevation model"
raise ValueError(msg)
dsm= gdal.Open(dsm)
dsm_arr= dsm.ReadAsArray()
geo= dsm.GetGeoTransform()
lons= np.linspace(geo[0], geo[1]*(dsm.RasterXSize)+geo[0], dsm.RasterXSize)
lats= np.linspace(geo[3], geo[-1]*dsm.RasterYSize+geo[3], dsm.RasterYSize)
lons2d, lats2d= np.meshgrid(lons, lats)
from cresthh.anuga.file.netcdf import NetCDFFile
p = NetCDFFile(sww_file)
z= np.array(p.variables['stage'])
x = np.array(p.variables['x']) + p.xllcorner
y = np.array(p.variables['y']) + p.yllcorner
_y, _x= transform(s_srs, t_srs, x, y)
triangles = np.array(p.variables['volumes'])
triang = mtri.Triangulation(_x, _y, triangles)
if isinstance(toReduce,int):
_z= z[toReduce]
else:
_z= z.max(axis=0)
if interp=='linear':
interpolator= mtri.LinearTriInterpolator(triang, _z)
elif interp=='cubic':
interpolator= mtri.CubicTriInterpolator(triang, _z, kind='geom')
zi_interp= interpolator(lons2d,lats2d)
if ifFloodFill:
from skimage.morphology import reconstruction
zi_interp[zi_interp<dsm_arr]= dsm_arr[zi_interp<dsm_arr]
filled = reconstruction(zi_interp, dsm_arr, method='erosion')
export_tif(base_name+'.tif', lons, lats, filled-dsm_arr, dsm)
else:
zi_interp[zi_interp<dsm_arr]= dsm_arr[zi_interp<dsm_arr]
export_tif(base_name+'.tif', lons, lats, zi_interp-dsm_arr, dsm)
else:
raise ValueError('invalid argument, only supports LSI and cubic')
# os.system('rm %s && mv %s %s'%(dst, dst+'.temp',dst))
print('Completed! output file name: %s'%dst)
| [
"[email protected]"
]
| |
3622942b7c93de7b6819004d190c5034570c3137 | eb33957e7b140c762fb77e5c83e5bba14aaeb8d3 | /jam/server/api/v1/namespace.py | 67e81b1ea025c04385aaeeced47b442bdb4acd19 | []
| no_license | AndrewSallans/jamdb | 8a4a9d5ec03ca77bd0ad45404f8031b558898270 | 6eb4c0b465034e7ef5a648873be2353c4093c863 | refs/heads/develop | 2021-01-15T11:19:59.679368 | 2016-03-04T23:37:02 | 2016-03-04T23:37:02 | 53,345,931 | 0 | 0 | null | 2016-03-07T17:46:52 | 2016-03-07T17:46:49 | HTML | UTF-8 | Python | false | false | 2,996 | py | import operator
import functools
from jam import Q
from jam import NamespaceManager
from jam.auth import Permissions
from jam.server.api.v1.base import View
from jam.server.api.v1.base import Serializer
from jam.server.api.v1.base import Relationship
class NamespaceView(View):
name = 'namespace'
plural = 'namespaces'
MANAGER = NamespaceManager()
@classmethod
def load(self, id):
return self.MANAGER.get_namespace(id)
def __init__(self, resource=None):
super().__init__(resource=resource)
self._namespace = resource
def get_permissions(self, request):
if request.method == 'GET' and self.resource is None:
return Permissions.NONE
return super().get_permissions(request)
def read(self, user):
return self.MANAGER.read(self.resource.name)
def update(self, patch, user):
return self.MANAGER.update(self._namespace.name, patch, user.uid)
def list(self, filter, sort, page, page_size, user):
query = functools.reduce(operator.or_, [
Q('data.permissions.*', 'and', Permissions.READ),
Q('data.permissions.{0.type}-*'.format(user), 'and', Permissions.READ),
Q('data.permissions.{0.type}-{0.provider}-*'.format(user), 'and', Permissions.READ),
Q('data.permissions.{0.type}-{0.provider}-{0.id}'.format(user), 'and', Permissions.READ),
])
if filter:
filter &= query
else:
filter = query
return self.MANAGER.select().where(filter).page(page, page_size).order_by(sort)
class CollectionRelationship(Relationship):
@classmethod
def view(cls, namespace):
from jam.server.api.v1.collection import CollectionView
return CollectionView(namespace)
@classmethod
def serializer(cls):
from jam.server.api.v1.collection import CollectionSerializer
return CollectionSerializer
@classmethod
def self_link(cls, request, namespace):
if request.path.startswith('/v1/id'):
return '{}://{}/v1/id/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
return '{}://{}/v1/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
@classmethod
def related_link(cls, request, namespace):
if request.path.startswith('/v1/id'):
return '{}://{}/v1/id/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
return '{}://{}/v1/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
class NamespaceSerializer(Serializer):
type = 'namespaces'
relations = {
'collections': CollectionRelationship
}
@classmethod
def attributes(cls, inst):
return {
'name': inst.ref,
'permissions': {
sel: Permissions(perm).name
for sel, perm in inst.data['permissions'].items()
}
}
| [
"[email protected]"
]
| |
cd94742c9c7694054d5b7b202660c0becf1c5052 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1700.py | 4c068771cbd023f47f2d94b37f052921066dddfa | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | def data(filename):
fi = open(filename,'r')
o = open(filename+".out",'w')
tests = fi.readline().strip()
a = fi.readlines()
for i in range(0,int(tests)):
c,f,x = map(float,a[i].strip().split())
nf = 1
t1 = x/2
t2 = c/2+x/(2+nf*f)
while (t1-t2 > 10**-7):
nf += 1
t1 = t2
t2 = buy(c,f,nf) + x/(2+nf*f)
o.write("Case #" + str(i+1) + ": %.7f\n" % t1)
fi.close()
o.close()
def buy(c,f,nf):
time = 0
for i in range(0,nf):
time += c/(2+i*f)
return time | [
"[email protected]"
]
| |
d2c0be537b266e7b741920df715f2b942cf343bb | 3437f523434b86af02476fc0056030a67feaa9a5 | /examples/mpc_linear_svm/launcher.py | d88a1d33341b60a05cf1faa29f11c82ceea9e982 | [
"MIT"
]
| permissive | facebookresearch/CrypTen | 481d0bfc94582eedef8b3510d91fd6b3ce253097 | 99c3a046b705c9d69d7a10fcab59a444ffbee39a | refs/heads/main | 2023-09-04T21:10:29.331999 | 2023-08-25T22:11:00 | 2023-08-25T22:11:00 | 202,443,088 | 1,388 | 323 | MIT | 2023-09-01T16:34:22 | 2019-08-15T00:00:31 | Python | UTF-8 | Python | false | false | 2,518 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_linear_svm example in multiprocess mode:
$ python3 examples/mpc_linear_svm/launcher.py --multiprocess
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_linear_svm/mpc_linear_svm.py \
examples/mpc_linear_svm/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Linear SVM Training")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=50, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--examples", default=50, type=int, metavar="N", help="number of examples per epoch"
)
parser.add_argument(
"--features",
default=100,
type=int,
metavar="N",
help="number of features per example",
)
parser.add_argument(
"--lr", "--learning-rate", default=0.5, type=float, help="initial learning rate"
)
parser.add_argument(
"--skip_plaintext",
default=False,
action="store_true",
help="skip evaluation for plaintext svm",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_linear_svm import run_mpc_linear_svm
run_mpc_linear_svm(
args.epochs, args.examples, args.features, args.lr, args.skip_plaintext
)
def main(run_experiment):
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
| [
"[email protected]"
]
| |
e9dbfe8104201e8d10606f7234b30f1427f85c8c | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/LUT/__init__.py | ad67a04cf4d26dc2ca675547218211dd1692f45c | []
| no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 1,089 | py | #!/usr/bin/python
##################
# __init__.py
#
# Copyright David Baddeley, 2009
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
from lut import *
def applyLUT(seg, gain, offset, lut, ima):
if seg.dtype == 'uint8':
applyLUTu8(seg, gain, offset, lut, ima)
elif seg.dtype == 'uint16':
#print lut.strides
applyLUTu16(seg, gain, offset, lut, ima)
else:
applyLUTf(seg.astype('f'), gain, offset, lut, ima)
| [
"[email protected]"
]
| |
333f6a2ec3a9229e86c9318beb30008c51908041 | 591a1a5b334efc878d890c2492a2f1b6cf475b6c | /fixJobAccountant.py | 41d6c79ffadfea61bdaa333e5338c42660fc5f43 | []
| no_license | amaltaro/ProductionTools | b827fa8a80006443b00004f90658791fdea4fc26 | df85a4d4ae35e4f4c7523fcba9b22a1300329e06 | refs/heads/master | 2023-06-08T22:59:19.377928 | 2023-05-24T18:04:26 | 2023-05-24T18:04:26 | 22,791,862 | 0 | 3 | null | 2021-03-22T11:41:18 | 2014-08-09T17:55:03 | Python | UTF-8 | Python | false | false | 2,519 | py | """
__fixJobAccountant.py__
Fixes Report.pkl files when JobAccountant crashes reporting that
TaskName does not exist in the FJR.
Created on Oct 15, 2014.
@author: amaltaro
"""
import sys, os, subprocess
import threading
import logging
import time
from pprint import pprint
from optparse import OptionParser
try:
from collections import defaultdict
from WMCore.WMInit import connectToDB
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.FwkJobReport.Report import Report
except ImportError:
print "You do not have a proper environment, please source the following:"
print "source /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh"
sys.exit(1)
getQuery = """
SELECT wj.fwjr_path, ww.task FROM wmbs_workflow ww
INNER JOIN wmbs_subscription ws ON ws.workflow = ww.id
INNER JOIN wmbs_jobgroup wjg ON wjg.subscription = ws.id
INNER JOIN wmbs_job wj ON wj.jobgroup = wjg.id
WHERE wj.id = """
def main():
"""
_main_
"""
usage = "Usage: %prog -j jobId"
parser = OptionParser(usage = usage)
parser.add_option('-j', '--jobId', help = 'Wmbs jobId reported in the component log', dest = 'jobId')
(options, args) = parser.parse_args()
if not options.jobId:
parse.error('You must provide at least one jobId')
print 'Example: python fixJobAccountant.py -j "1678 1679"'
sys.exit(1)
if 'WMAGENT_CONFIG' not in os.environ:
os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
if 'manage' not in os.environ:
os.environ['manage'] = '/data/srv/wmagent/current/config/wmagent/manage'
connectToDB()
myThread = threading.currentThread()
formatter = DBFormatter(logging, myThread.dbi)
for job in options.jobId.split():
myQuery = getQuery + str(job)
output = myThread.transaction.processData(myQuery)
result = formatter.format(output)
reportPath = result[0][0]
taskName = result[0][1]
#print 'Report path: %s' % reportPath
#print 'Task name: %s' % taskName
jr = Report(reportPath)
if jr.getTaskName():
print "Job id %s already has a TaskName %s.\nSkipping .." % (job, jr.getTaskName())
continue
jr.setTaskName(taskName)
jr.save(reportPath)
print "Updated TaskName for fwjr for jobId: %s" % job
print "Done!"
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
38059ca498d3f9141b84156a522fd5bca676ddfa | 0afc72deeb8928e6d488a1c3cb762ed0c4bd73fc | /scripts/02.transform/02.encode.py | da3e9cc96bd537a4a9ab5f8a61a53ebb8d4cb6cf | []
| no_license | Asky-M/dscnf-06 | cb5cd4a402938bcc53723dbc7bbf4e95b548c0f0 | cbe021d568c94b14929759e905592a11cefc7626 | refs/heads/master | 2023-03-27T03:28:05.390087 | 2021-04-03T06:30:51 | 2021-04-03T06:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # encode/decode base64 with codecs
import codecs
s = b"Satria Ady Pradana"
t = codecs.encode(s, "base64")
print(t)
u = codecs.decode(t, "base64")
print(u)
# encode/decode base64 with base64
import base64
s = b"Satria Ady Pradana"
t = base64.b64encode(s)
print(t)
u = base64.b64decode(t)
print(u)
# encode/decode hex
import binascii
key = b"\x17U\r\xda'US8\x99c\x80\x97\x83s\x9f\xd3"
print(key)
h = binascii.hexlify(key)
print(h)
u = binascii.unhexlify(h)
print(u) | [
"[email protected]"
]
| |
72abe96e55888b2f816aee1fbf0a969dc95b4989 | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3919/AC_py/508160.py | e46139c365eab6e0455d028613d93c2cc248b297 | []
| no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # coding=utf-8
import sys
while True:
a,b,c=map(int,input().split())
if(a+b>c and a+c>b and b+c>a):
if a==b and b==c:
print("DB")
else:
if a==b or b==c:
print('DY')
else:
if b*b+c*c==a*a or a*a+c*c==b*b or a*a+b*b==c*c:
print('ZJ')
else:
print("PT")
else:
print("ERROR") | [
"[email protected]"
]
| |
988e4a4ea56c347a5b6641d6283315ce241c7248 | 884a128552b5f7e698194de22e4a8b4fd43e5db6 | /setup.py | f89439a6f5392e8b10d0a0504621730a8a2d2e6d | [
"Apache-2.0"
]
| permissive | naivenlp/naivenlp-legacy | b3057bdeb54bc54b1df3de8fd3eb5a1af909690b | dbe0d6ac3b422618fe41a763c256077b27f75347 | refs/heads/master | 2022-11-25T11:48:18.411506 | 2020-07-22T08:01:25 | 2020-07-22T08:01:25 | 269,890,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="naivenlp",
version="0.0.9",
description="NLP toolkit, including tokenization, sequence tagging, etc.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/luozhouyang/naivenlp",
author="ZhouYang Luo",
author_email="[email protected]",
packages=setuptools.find_packages(),
# include_package_data=True,
package_data={
},
install_requires=[
"jieba",
"numpy",
"strsimpy",
"fake_useragent",
"requests",
],
dependency_links=[
],
extras_require={
"tf": ["tensorflow>=2.2.0"]
},
license="Apache Software License",
classifiers=(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
)
)
| [
"[email protected]"
]
| |
8353cc7bb8452e9ef9ae1467ef3f8ec6c9d9f34e | 2a24dba82767419cf7d2269875bf0a297f41580c | /vispy/app/backends/_pyside.py | 00949a4951a577f056a9c62b2874f44b3055f725 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
]
| permissive | shjoshi/vispy | 58b300d23486b7478b786977b3548dd7225de847 | 2f3d169aa60c738467e766c59096f51570483d6f | refs/heads/master | 2020-12-25T12:40:36.545768 | 2014-08-06T22:59:35 | 2014-08-06T22:59:35 | 22,704,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" PySide proxy backend for the qt backend.
"""
import sys
from .. import backends
try:
# Try importing
from PySide import QtGui, QtCore, QtOpenGL # noqa
except Exception as exp:
# Fail: this backend cannot be used
available, testable, why_not, which = False, False, str(exp), None
else:
# Success
available, testable, why_not = True, True, None
has_uic = False
import PySide
which = ('PySide', PySide.__version__, QtCore.__version__)
# Remove _qt module to force an import even if it was already imported
sys.modules.pop(__name__.replace('_pyside', '_qt'), None)
# Import _qt. Keep a ref to the module object!
backends.qt_lib = 'pyside' # Signal to _qt what it should import
from . import _qt # noqa
from ._qt import * # noqa
| [
"[email protected]"
]
| |
b528bfb1f19e47d8bd43f86d689eaa069c15b2e7 | ac34cad5e20b8f46c0b0aa67df829f55ed90dcb6 | /src/assets/ba_data/python/baclassic/_store.py | 8caf49500330f885ef7fe97f84c4efa66d039832 | [
"MIT"
]
| permissive | sudo-logic/ballistica | fd3bf54a043717f874b71f4b2ccd551d61c65008 | 9aa73cd20941655e96b0e626017a7395ccb40062 | refs/heads/master | 2023-07-26T19:52:06.113981 | 2023-07-12T21:32:56 | 2023-07-12T21:37:46 | 262,056,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,317 | py | # Released under the MIT License. See LICENSE for details.
#
"""Store related functionality for classic mode."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
import babase
import bascenev1
if TYPE_CHECKING:
from typing import Any
class StoreSubsystem:
"""Wrangles classic store."""
def get_store_item(self, item: str) -> dict[str, Any]:
"""(internal)"""
return self.get_store_items()[item]
def get_store_item_name_translated(self, item_name: str) -> babase.Lstr:
"""Return a babase.Lstr for a store item name."""
# pylint: disable=cyclic-import
item_info = self.get_store_item(item_name)
if item_name.startswith('characters.'):
return babase.Lstr(
translate=('characterNames', item_info['character'])
)
if item_name in ['merch']:
return babase.Lstr(resource='merchText')
if item_name in ['upgrades.pro', 'pro']:
return babase.Lstr(
resource='store.bombSquadProNameText',
subs=[('${APP_NAME}', babase.Lstr(resource='titleText'))],
)
if item_name.startswith('maps.'):
map_type: type[bascenev1.Map] = item_info['map_type']
return bascenev1.get_map_display_string(map_type.name)
if item_name.startswith('games.'):
gametype: type[bascenev1.GameActivity] = item_info['gametype']
return gametype.get_display_string()
if item_name.startswith('icons.'):
return babase.Lstr(resource='editProfileWindow.iconText')
raise ValueError('unrecognized item: ' + item_name)
def get_store_item_display_size(
self, item_name: str
) -> tuple[float, float]:
"""(internal)"""
if item_name.startswith('characters.'):
return 340 * 0.6, 430 * 0.6
if item_name in ['pro', 'upgrades.pro', 'merch']:
assert babase.app.classic is not None
return 650 * 0.9, 500 * (
0.72
if (
babase.app.config.get('Merch Link')
and babase.app.ui_v1.uiscale is babase.UIScale.SMALL
)
else 0.85
)
if item_name.startswith('maps.'):
return 510 * 0.6, 450 * 0.6
if item_name.startswith('icons.'):
return 265 * 0.6, 250 * 0.6
return 450 * 0.6, 450 * 0.6
def get_store_items(self) -> dict[str, dict]:
"""Returns info about purchasable items.
(internal)
"""
# pylint: disable=cyclic-import
from bascenev1lib import maps
assert babase.app.classic is not None
if babase.app.classic.store_items is None:
from bascenev1lib.game import ninjafight
from bascenev1lib.game import meteorshower
from bascenev1lib.game import targetpractice
from bascenev1lib.game import easteregghunt
# IMPORTANT - need to keep this synced with the master server.
# (doing so manually for now)
babase.app.classic.store_items = {
'characters.kronk': {'character': 'Kronk'},
'characters.zoe': {'character': 'Zoe'},
'characters.jackmorgan': {'character': 'Jack Morgan'},
'characters.mel': {'character': 'Mel'},
'characters.snakeshadow': {'character': 'Snake Shadow'},
'characters.bones': {'character': 'Bones'},
'characters.bernard': {
'character': 'Bernard',
'highlight': (0.6, 0.5, 0.8),
},
'characters.pixie': {'character': 'Pixel'},
'characters.wizard': {'character': 'Grumbledorf'},
'characters.frosty': {'character': 'Frosty'},
'characters.pascal': {'character': 'Pascal'},
'characters.cyborg': {'character': 'B-9000'},
'characters.agent': {'character': 'Agent Johnson'},
'characters.taobaomascot': {'character': 'Taobao Mascot'},
'characters.santa': {'character': 'Santa Claus'},
'characters.bunny': {'character': 'Easter Bunny'},
'merch': {},
'pro': {},
'maps.lake_frigid': {'map_type': maps.LakeFrigid},
'games.ninja_fight': {
'gametype': ninjafight.NinjaFightGame,
'previewTex': 'courtyardPreview',
},
'games.meteor_shower': {
'gametype': meteorshower.MeteorShowerGame,
'previewTex': 'rampagePreview',
},
'games.target_practice': {
'gametype': targetpractice.TargetPracticeGame,
'previewTex': 'doomShroomPreview',
},
'games.easter_egg_hunt': {
'gametype': easteregghunt.EasterEggHuntGame,
'previewTex': 'towerDPreview',
},
'icons.flag_us': {
'icon': babase.charstr(
babase.SpecialChar.FLAG_UNITED_STATES
)
},
'icons.flag_mexico': {
'icon': babase.charstr(babase.SpecialChar.FLAG_MEXICO)
},
'icons.flag_germany': {
'icon': babase.charstr(babase.SpecialChar.FLAG_GERMANY)
},
'icons.flag_brazil': {
'icon': babase.charstr(babase.SpecialChar.FLAG_BRAZIL)
},
'icons.flag_russia': {
'icon': babase.charstr(babase.SpecialChar.FLAG_RUSSIA)
},
'icons.flag_china': {
'icon': babase.charstr(babase.SpecialChar.FLAG_CHINA)
},
'icons.flag_uk': {
'icon': babase.charstr(
babase.SpecialChar.FLAG_UNITED_KINGDOM
)
},
'icons.flag_canada': {
'icon': babase.charstr(babase.SpecialChar.FLAG_CANADA)
},
'icons.flag_india': {
'icon': babase.charstr(babase.SpecialChar.FLAG_INDIA)
},
'icons.flag_japan': {
'icon': babase.charstr(babase.SpecialChar.FLAG_JAPAN)
},
'icons.flag_france': {
'icon': babase.charstr(babase.SpecialChar.FLAG_FRANCE)
},
'icons.flag_indonesia': {
'icon': babase.charstr(babase.SpecialChar.FLAG_INDONESIA)
},
'icons.flag_italy': {
'icon': babase.charstr(babase.SpecialChar.FLAG_ITALY)
},
'icons.flag_south_korea': {
'icon': babase.charstr(babase.SpecialChar.FLAG_SOUTH_KOREA)
},
'icons.flag_netherlands': {
'icon': babase.charstr(babase.SpecialChar.FLAG_NETHERLANDS)
},
'icons.flag_uae': {
'icon': babase.charstr(
babase.SpecialChar.FLAG_UNITED_ARAB_EMIRATES
)
},
'icons.flag_qatar': {
'icon': babase.charstr(babase.SpecialChar.FLAG_QATAR)
},
'icons.flag_egypt': {
'icon': babase.charstr(babase.SpecialChar.FLAG_EGYPT)
},
'icons.flag_kuwait': {
'icon': babase.charstr(babase.SpecialChar.FLAG_KUWAIT)
},
'icons.flag_algeria': {
'icon': babase.charstr(babase.SpecialChar.FLAG_ALGERIA)
},
'icons.flag_saudi_arabia': {
'icon': babase.charstr(babase.SpecialChar.FLAG_SAUDI_ARABIA)
},
'icons.flag_malaysia': {
'icon': babase.charstr(babase.SpecialChar.FLAG_MALAYSIA)
},
'icons.flag_czech_republic': {
'icon': babase.charstr(
babase.SpecialChar.FLAG_CZECH_REPUBLIC
)
},
'icons.flag_australia': {
'icon': babase.charstr(babase.SpecialChar.FLAG_AUSTRALIA)
},
'icons.flag_singapore': {
'icon': babase.charstr(babase.SpecialChar.FLAG_SINGAPORE)
},
'icons.flag_iran': {
'icon': babase.charstr(babase.SpecialChar.FLAG_IRAN)
},
'icons.flag_poland': {
'icon': babase.charstr(babase.SpecialChar.FLAG_POLAND)
},
'icons.flag_argentina': {
'icon': babase.charstr(babase.SpecialChar.FLAG_ARGENTINA)
},
'icons.flag_philippines': {
'icon': babase.charstr(babase.SpecialChar.FLAG_PHILIPPINES)
},
'icons.flag_chile': {
'icon': babase.charstr(babase.SpecialChar.FLAG_CHILE)
},
'icons.fedora': {
'icon': babase.charstr(babase.SpecialChar.FEDORA)
},
'icons.hal': {'icon': babase.charstr(babase.SpecialChar.HAL)},
'icons.crown': {
'icon': babase.charstr(babase.SpecialChar.CROWN)
},
'icons.yinyang': {
'icon': babase.charstr(babase.SpecialChar.YIN_YANG)
},
'icons.eyeball': {
'icon': babase.charstr(babase.SpecialChar.EYE_BALL)
},
'icons.skull': {
'icon': babase.charstr(babase.SpecialChar.SKULL)
},
'icons.heart': {
'icon': babase.charstr(babase.SpecialChar.HEART)
},
'icons.dragon': {
'icon': babase.charstr(babase.SpecialChar.DRAGON)
},
'icons.helmet': {
'icon': babase.charstr(babase.SpecialChar.HELMET)
},
'icons.mushroom': {
'icon': babase.charstr(babase.SpecialChar.MUSHROOM)
},
'icons.ninja_star': {
'icon': babase.charstr(babase.SpecialChar.NINJA_STAR)
},
'icons.viking_helmet': {
'icon': babase.charstr(babase.SpecialChar.VIKING_HELMET)
},
'icons.moon': {'icon': babase.charstr(babase.SpecialChar.MOON)},
'icons.spider': {
'icon': babase.charstr(babase.SpecialChar.SPIDER)
},
'icons.fireball': {
'icon': babase.charstr(babase.SpecialChar.FIREBALL)
},
'icons.mikirog': {
'icon': babase.charstr(babase.SpecialChar.MIKIROG)
},
}
return babase.app.classic.store_items
def get_store_layout(self) -> dict[str, list[dict[str, Any]]]:
"""Return what's available in the store at a given time.
Categorized by tab and by section.
"""
plus = babase.app.plus
classic = babase.app.classic
assert classic is not None
assert plus is not None
if classic.store_layout is None:
classic.store_layout = {
'characters': [{'items': []}],
'extras': [{'items': ['pro']}],
'maps': [{'items': ['maps.lake_frigid']}],
'minigames': [],
'icons': [
{
'items': [
'icons.mushroom',
'icons.heart',
'icons.eyeball',
'icons.yinyang',
'icons.hal',
'icons.flag_us',
'icons.flag_mexico',
'icons.flag_germany',
'icons.flag_brazil',
'icons.flag_russia',
'icons.flag_china',
'icons.flag_uk',
'icons.flag_canada',
'icons.flag_india',
'icons.flag_japan',
'icons.flag_france',
'icons.flag_indonesia',
'icons.flag_italy',
'icons.flag_south_korea',
'icons.flag_netherlands',
'icons.flag_uae',
'icons.flag_qatar',
'icons.flag_egypt',
'icons.flag_kuwait',
'icons.flag_algeria',
'icons.flag_saudi_arabia',
'icons.flag_malaysia',
'icons.flag_czech_republic',
'icons.flag_australia',
'icons.flag_singapore',
'icons.flag_iran',
'icons.flag_poland',
'icons.flag_argentina',
'icons.flag_philippines',
'icons.flag_chile',
'icons.moon',
'icons.fedora',
'icons.spider',
'icons.ninja_star',
'icons.skull',
'icons.dragon',
'icons.viking_helmet',
'icons.fireball',
'icons.helmet',
'icons.crown',
]
}
],
}
store_layout = classic.store_layout
store_layout['characters'] = [
{
'items': [
'characters.kronk',
'characters.zoe',
'characters.jackmorgan',
'characters.mel',
'characters.snakeshadow',
'characters.bones',
'characters.bernard',
'characters.agent',
'characters.frosty',
'characters.pascal',
'characters.pixie',
]
}
]
store_layout['minigames'] = [
{
'items': [
'games.ninja_fight',
'games.meteor_shower',
'games.target_practice',
]
}
]
if plus.get_v1_account_misc_read_val('xmas', False):
store_layout['characters'][0]['items'].append('characters.santa')
store_layout['characters'][0]['items'].append('characters.wizard')
store_layout['characters'][0]['items'].append('characters.cyborg')
if plus.get_v1_account_misc_read_val('easter', False):
store_layout['characters'].append(
{
'title': 'store.holidaySpecialText',
'items': ['characters.bunny'],
}
)
store_layout['minigames'].append(
{
'title': 'store.holidaySpecialText',
'items': ['games.easter_egg_hunt'],
}
)
# This will cause merch to show only if the master-server has
# given us a link (which means merch is available in our region).
store_layout['extras'] = [{'items': ['pro']}]
if babase.app.config.get('Merch Link'):
store_layout['extras'][0]['items'].append('merch')
return store_layout
def get_clean_price(self, price_string: str) -> str:
"""(internal)"""
# I'm not brave enough to try and do any numerical
# manipulation on formatted price strings, but lets do a
# few swap-outs to tidy things up a bit.
psubs = {
'$2.99': '$3.00',
'$4.99': '$5.00',
'$9.99': '$10.00',
'$19.99': '$20.00',
'$49.99': '$50.00',
}
return psubs.get(price_string, price_string)
def get_available_purchase_count(self, tab: str | None = None) -> int:
"""(internal)"""
plus = babase.app.plus
if plus is None:
return 0
try:
if plus.get_v1_account_state() != 'signed_in':
return 0
count = 0
our_tickets = plus.get_v1_account_ticket_count()
store_data = self.get_store_layout()
if tab is not None:
tabs = [(tab, store_data[tab])]
else:
tabs = list(store_data.items())
for tab_name, tabval in tabs:
if tab_name == 'icons':
continue # too many of these; don't show..
count = self._calc_count_for_tab(tabval, our_tickets, count)
return count
except Exception:
logging.exception('Error calcing available purchases.')
return 0
def _calc_count_for_tab(
self, tabval: list[dict[str, Any]], our_tickets: int, count: int
) -> int:
plus = babase.app.plus
assert plus
for section in tabval:
for item in section['items']:
ticket_cost = plus.get_v1_account_misc_read_val(
'price.' + item, None
)
if ticket_cost is not None:
if our_tickets >= ticket_cost and not plus.get_purchased(
item
):
count += 1
return count
def get_available_sale_time(self, tab: str) -> int | None:
"""(internal)"""
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
plus = babase.app.plus
assert plus is not None
try:
import datetime
app = babase.app
assert app.classic is not None
sale_times: list[int | None] = []
# Calc time for our pro sale (old special case).
if tab == 'extras':
config = app.config
if app.classic.accounts.have_pro():
return None
# If we haven't calced/loaded start times yet.
if app.classic.pro_sale_start_time is None:
# If we've got a time-remaining in our config, start there.
if 'PSTR' in config:
app.classic.pro_sale_start_time = int(
babase.apptime() * 1000
)
app.classic.pro_sale_start_val = config['PSTR']
else:
# We start the timer once we get the duration from
# the server.
start_duration = plus.get_v1_account_misc_read_val(
'proSaleDurationMinutes', None
)
if start_duration is not None:
app.classic.pro_sale_start_time = int(
babase.apptime() * 1000
)
app.classic.pro_sale_start_val = (
60000 * start_duration
)
# If we haven't heard from the server yet, no sale..
else:
return None
assert app.classic.pro_sale_start_val is not None
val: int | None = max(
0,
app.classic.pro_sale_start_val
- (
int(babase.apptime() * 1000.0)
- app.classic.pro_sale_start_time
),
)
# Keep the value in the config up to date. I suppose we should
# write the config occasionally but it should happen often
# enough for other reasons.
config['PSTR'] = val
if val == 0:
val = None
sale_times.append(val)
# Now look for sales in this tab.
sales_raw = plus.get_v1_account_misc_read_val('sales', {})
store_layout = self.get_store_layout()
for section in store_layout[tab]:
for item in section['items']:
if item in sales_raw:
if not plus.get_purchased(item):
to_end = (
datetime.datetime.utcfromtimestamp(
sales_raw[item]['e']
)
- datetime.datetime.utcnow()
).total_seconds()
if to_end > 0:
sale_times.append(int(to_end * 1000))
# Return the smallest time I guess?
sale_times_int = [t for t in sale_times if isinstance(t, int)]
return min(sale_times_int) if sale_times_int else None
except Exception:
logging.exception('Error calcing sale time.')
return None
def get_unowned_maps(self) -> list[str]:
"""Return the list of local maps not owned by the current account.
Category: **Asset Functions**
"""
plus = babase.app.plus
unowned_maps: set[str] = set()
if not babase.app.headless_mode:
for map_section in self.get_store_layout()['maps']:
for mapitem in map_section['items']:
if plus is None or not plus.get_purchased(mapitem):
m_info = self.get_store_item(mapitem)
unowned_maps.add(m_info['map_type'].name)
return sorted(unowned_maps)
def get_unowned_game_types(self) -> set[type[bascenev1.GameActivity]]:
"""Return present game types not owned by the current account."""
try:
plus = babase.app.plus
unowned_games: set[type[bascenev1.GameActivity]] = set()
if not babase.app.headless_mode:
for section in self.get_store_layout()['minigames']:
for mname in section['items']:
if plus is None or not plus.get_purchased(mname):
m_info = self.get_store_item(mname)
unowned_games.add(m_info['gametype'])
return unowned_games
except Exception:
logging.exception('Error calcing un-owned games.')
return set()
| [
"[email protected]"
]
| |
d33c3d0bc0f509999dd2ca1132e50bf6291c76f8 | 4ebdc7053d9341ce7ad45f1e859ff86ef1455177 | /52_global.py | e2b5ca2d678a6f222101e9a4cc221547301aa89d | []
| no_license | daikiante/python | 1f4d55e1fd04eef22702b364148b8e1a2beea2d3 | 9d604b8dcd9e3cbe8b4db24ef16c5c969f6f894f | refs/heads/master | 2020-09-17T00:14:24.034179 | 2019-12-02T09:03:25 | 2019-12-02T09:03:25 | 223,928,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # global
'''
def関数の中(local)ではglobal変数は使えない。
global変数を使う場合はlocalの中でglobal変数宣言をして使う。
'''
name = 'lohit'
age = 20
def person():
global age
print('my age is',age)
per = 'lohit badiger'
print('my name is',per)
person()
print('global name are',name)
print('global age is',age)
# Exsample
num1 = 10
num2 = 3000
def sum():
global num1
print(num1 + num2)
sum()
print(num1 + num2) | [
"[email protected]"
]
| |
b113e7e6e71c42480977c18e82a7bf4d3ecbfc8a | 2e10314f0a6a32cbfdce6b80c7767b84de421741 | /精品真题/精品-one.py | e2135999ef9f92009ca10a79d4df38384cd13fdb | []
| no_license | tang1323/Ing_Interview | 06a9cb19c932b2852dd55655b0d46b814ffa9095 | a1068d3739d2088a2edcf8314e18659e0e9003f8 | refs/heads/master | 2023-04-06T14:17:37.757618 | 2021-04-14T14:14:01 | 2021-04-14T14:14:01 | 357,929,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py |
# def add_Run(L=None):
# if L is None:
# L = []
# L.append('Run')
# return L
# add_Run()
# add_Run()
# print(add_Run(['Lying']))
# ds = {'av':2, 'vr':4, 'ls':9, 'path':6}
# print(ds.popitem(), len(ds))
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/a', 'r') as f:
# print(f.read().split(','))
# aaa = [8, 5, 2, 2]
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/output', 'w') as f:
# for aa in aaa:
# f.write(';'.join.str(aa))
# x, y = 1, 2
# while x < 20:
# x, y = y, x + y
# print(x)
# ls = [2, 0, 6]
# x = 100
# try:
# for i in ls:
# y = 100 // i
# print(y)
# except:
# print('error')
# import random as r
# zmb = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz'
# r.seed(1)
# code = ''
# for i in range(4):
# code += r.choice(zmb)
# print(code)
# import turtle as t
#
# color = ['red','pink','green']
# ra = [20, 50, 100]
# for i in range(3):
# t.pu()
# t.goto(0, -ra[i])
# t.pd()
# t.pencolor(color[i])
# t.circle(ra[i])
# t.done()
| [
"[email protected]"
]
| |
737ec987dfe8f44ec60ce95839fb21130c803793 | 2a1a175efc9c482db0e6d96569f92b9583990acc | /eventex/subscriptions/tests/test_view_new.py | 351daeb6ab3b8abda88f2861141510e7c1378d8c | []
| no_license | mazulo/wttd_eventex | 2e97e3724f2b8396b8cc73175d15defd09b4a86b | 691008562d2143cc57c8b4bb5042aa2c1fdc6602 | refs/heads/master | 2021-01-10T07:29:20.343157 | 2016-03-16T18:21:10 | 2016-03-16T18:21:10 | 48,304,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | from django.core import mail
from django.test import TestCase
from django.shortcuts import resolve_url as r
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
class SubscriptionsNewGet(TestCase):
def setUp(self):
self.resp = self.client.get(r('subscriptions:new'))
def test_get(self):
"""GET /inscricao/ must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use subscriptions/subscription_form.html"""
self.assertTemplateUsed(
self.resp,
'subscriptions/subscription_form.html'
)
def test_html(self):
"""Html must contain input tags"""
tags = (
('<form', 1),
('<input', 6),
('type="text"', 3),
('type="email"', 1),
('type="submit"', 1),
)
for text, count in tags:
with self.subTest():
self.assertContains(self.resp, text, count)
def test_csrf(self):
"""Html must contain csrf"""
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
"""Context must have subscription form"""
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
class SubscriptionsNewPost(TestCase):
def setUp(self):
data = dict(name='Patrick Mazulo', cpf='03286218383',
email='[email protected]', phone='86-99988-7848')
self.resp = self.client.post(r('subscriptions:new'), data)
def test_post(self):
"""Valid POST should redirect to /inscricao/1/"""
self.assertRedirects(self.resp, r('subscriptions:detail', 1))
def test_send_subscribe(self):
self.assertEqual(1, len(mail.outbox))
def test_save_subscription(self):
self.assertTrue(Subscription.objects.exists())
class SubscriptionsNewPostInvalid(TestCase):
def setUp(self):
self.resp = self.client.post(r('subscriptions:new'), {})
def test_post(self):
"""Invalid POST should not redirect"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
self.assertTemplateUsed(self.resp,
'subscriptions/subscription_form.html')
def test_has_form(self):
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
def test_form_has_errors(self):
form = self.resp.context['form']
self.assertTrue(form.errors)
def test_dont_save_subscription(self):
self.assertFalse(Subscription.objects.exists())
class TestTemplateRegressionTest(TestCase):
def test_template_has_non_field_errors(self):
invalid_data = dict(name='Patrick Mazulo', cpf='03286218383')
response = self.client.post(r('subscriptions:new'), invalid_data)
self.assertContains(response, '<ul class="errorlist nonfield">')
| [
"[email protected]"
]
| |
327bf3ff951ee285a77e0a2dfa30a0a852ac1426 | cceb97ce3d74ac17090786bc65f7ed30e37ad929 | /server/newfirst/migrations/0005_auto_20201024_0316.py | baaa7f017786874e8c0a9b6e7a9c50db448d3ef2 | []
| no_license | Catxiaobai/project | b47310efe498421cde794e289b4e753d843c8e40 | 76e346f69261433ccd146a3cbfa92b4e3864d916 | refs/heads/master | 2023-01-08T04:37:59.232492 | 2020-11-10T12:00:34 | 2020-11-10T12:00:34 | 291,014,545 | 1 | 4 | null | 2020-11-09T01:22:11 | 2020-08-28T10:08:16 | Python | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.1.1 on 2020-10-23 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newfirst', '0004_scenes'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='item_date',
),
migrations.RemoveField(
model_name='item',
name='item_leader',
),
]
| [
"[email protected]"
]
| |
91db8116494945ac4447f2c14fec8b83a4d5f470 | 66d184a2b36ab1db564305ea36be891aaf0e236b | /py/Python_Crash_Course/project2/two_d8.py | 52743e7fbb2329663e1615be5f979d1fb0082ff0 | []
| no_license | joyDDT/python_code | bef57936a1167fa65e28b6c52ab7857b34dc74a8 | 3aae56c51660579a4eaaa087ac2459c9bf2f2e23 | refs/heads/master | 2021-10-30T10:22:21.328633 | 2019-04-26T04:45:01 | 2019-04-26T04:45:01 | 112,004,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import pygal
from die import Die
#创建两个个D8
die_1 = Die(8)
die_2 = Die(8)
#投掷色子多次,并将结果储存在一个列表中
results = [ ]
for roll_num in range(1000):
result = die_1.roll( ) + die_2.roll( )
results.append(result)
#结果分析
frequencies = [ ]
max_num = die_1.num_sides + die_2.num_sides
for value in range(2, max_num+1):
frequency = results.count(value)
frequencies.append(frequency)
#结果可视化
hist = pygal.Bar( )
hist.title = 'Results of rolling two D8 1000 times.'
hist.x_labels = [x for x in range(2, max_num+1)]
hist.x_title = 'Result'
hist.y_title = 'Frequency of Result'
hist.add('D8+D8', frequencies)
hist.render_to_file('two_d8.svg')
| [
"[email protected]"
]
| |
dd481a8700e475bd2c82b82241d3ad689f39f95f | 56b60cb4e3dfa065839ce0dce5a50e163a4f9f3a | /api_part/__init__.py | fe26d15ead612aa115e93e6bb3c25e8b71983fcf | []
| no_license | Humbertzhang/DocTrans | 8acdd6634361130cb4f0d960baabd2a28de07332 | 242c0efbdbb660325df0de33910449566148bdb5 | refs/heads/master | 2021-01-20T05:13:58.521265 | 2017-08-31T08:07:11 | 2017-08-31T08:07:11 | 101,422,930 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | from ._init_ import __init__content
from .static import static_content | [
"[email protected]"
]
| |
05f3cbf560213a1004237cd81daf92637628f3b9 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/panpanpandas_ultrafinance/ultrafinance-master/deprecated/example/usTradingStrategy.py | ed1e6d309c7080a44956603b6c80661f73986aa1 | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 216 | py | '''
Created on July 17, 2010
@author: ppa
'''
from ultrafinance.processChain.processChain import runProcessChain
if __name__ == '__main__':
configFile1 = 'usTradingStrategy.ini'
runProcessChain(configFile1) | [
"[email protected]"
]
| |
819c11fb2ff6e9bbda0cb03380c26525458095b7 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=35/sched.py | a3b742d5b6b87242902b200cb99b1c50add5a6e7 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | -X FMLP -Q 0 -L 3 120 400
-X FMLP -Q 0 -L 3 93 400
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 1 -L 2 73 400
-X FMLP -Q 1 -L 2 63 250
-X FMLP -Q 2 -L 1 55 200
-X FMLP -Q 2 -L 1 45 400
-X FMLP -Q 3 -L 1 35 125
-X FMLP -Q 3 -L 1 35 150
22 100
21 100
| [
"[email protected]"
]
| |
8799587af23d45f613e72c763c6650d93bba8f46 | ecee6e84ba18100b621c7e06f493ae48e44a34fe | /devel/lib/python2.7/dist-packages/costmap_2d/cfg/VoxelPluginConfig.py | ca382f3ac25f62dda8cec625bd10f2ab8217430e | []
| no_license | theleastinterestingcoder/Thesis | 6d59e06b16cbe1588a6454689248c88867de2094 | 3f6945f03a58f0eff105fe879401a7f1df6f0166 | refs/heads/master | 2016-09-05T15:30:26.501946 | 2015-05-11T14:34:15 | 2015-05-11T14:34:15 | 31,631,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | ## *********************************************************
##
## File autogenerated for the costmap_2d package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 262, 'description': 'Whether to use this plugin or not', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'enabled', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 262, 'description': 'Max Obstacle Height', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_obstacle_height', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The z origin of the map in meters.', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'origin_z', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The z resolution of the map in meters/cell.', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_resolution', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 262, 'description': 'The number of voxels to in each vertical column.', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_voxels', 'edit_method': '', 'default': 10, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'The number of unknown cells allowed in a column considered to be known', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'unknown_threshold', 'edit_method': '', 'default': 15, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'The maximum number of marked cells allowed in a column considered to be free', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'mark_threshold', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 262, 'description': 'Method for combining two layers', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'combination_method', 'edit_method': "{'enum_description': 'Method for combining layers enum', 'enum': [{'srcline': 15, 'description': 'b', 'srcfile': '/home/alfred/quan_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Overwrite'}, {'srcline': 16, 'description': 'a', 'srcfile': '/home/alfred/quan_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Maximum'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
VoxelPlugin_Overwrite = 0
VoxelPlugin_Maximum = 1
| [
"[email protected]"
]
| |
917db0f72decd79edcafec2875bee0865c643e64 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/aio/_identity_directory_management.py | 8f6baa89e013ef5e2e83f0e742393afc730fc7cf | [
"MIT"
]
| permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 9,603 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IdentityDirectoryManagementConfiguration
from .operations import ContactsOrgContactOperations
from .operations import ContactsOperations
from .operations import ContractsContractOperations
from .operations import ContractsOperations
from .operations import DevicesDeviceOperations
from .operations import DevicesOperations
from .operations import DirectoryDirectoryOperations
from .operations import DirectoryOperations
from .operations import DirectoryAdministrativeUnitsOperations
from .operations import DirectoryRolesDirectoryRoleOperations
from .operations import DirectoryRolesOperations
from .operations import DirectoryRoleTemplatesDirectoryRoleTemplateOperations
from .operations import DirectoryRoleTemplatesOperations
from .operations import DomainsDomainOperations
from .operations import DomainsOperations
from .operations import OrganizationOrganizationOperations
from .operations import OrganizationOperations
from .operations import SubscribedSkusSubscribedSkuOperations
from .operations import UsersOperations
from .. import models
class IdentityDirectoryManagement(object):
"""IdentityDirectoryManagement.
:ivar contacts_org_contact: ContactsOrgContactOperations operations
:vartype contacts_org_contact: identity_directory_management.aio.operations.ContactsOrgContactOperations
:ivar contacts: ContactsOperations operations
:vartype contacts: identity_directory_management.aio.operations.ContactsOperations
:ivar contracts_contract: ContractsContractOperations operations
:vartype contracts_contract: identity_directory_management.aio.operations.ContractsContractOperations
:ivar contracts: ContractsOperations operations
:vartype contracts: identity_directory_management.aio.operations.ContractsOperations
:ivar devices_device: DevicesDeviceOperations operations
:vartype devices_device: identity_directory_management.aio.operations.DevicesDeviceOperations
:ivar devices: DevicesOperations operations
:vartype devices: identity_directory_management.aio.operations.DevicesOperations
:ivar directory_directory: DirectoryDirectoryOperations operations
:vartype directory_directory: identity_directory_management.aio.operations.DirectoryDirectoryOperations
:ivar directory: DirectoryOperations operations
:vartype directory: identity_directory_management.aio.operations.DirectoryOperations
:ivar directory_administrative_units: DirectoryAdministrativeUnitsOperations operations
:vartype directory_administrative_units: identity_directory_management.aio.operations.DirectoryAdministrativeUnitsOperations
:ivar directory_roles_directory_role: DirectoryRolesDirectoryRoleOperations operations
:vartype directory_roles_directory_role: identity_directory_management.aio.operations.DirectoryRolesDirectoryRoleOperations
:ivar directory_roles: DirectoryRolesOperations operations
:vartype directory_roles: identity_directory_management.aio.operations.DirectoryRolesOperations
:ivar directory_role_templates_directory_role_template: DirectoryRoleTemplatesDirectoryRoleTemplateOperations operations
:vartype directory_role_templates_directory_role_template: identity_directory_management.aio.operations.DirectoryRoleTemplatesDirectoryRoleTemplateOperations
:ivar directory_role_templates: DirectoryRoleTemplatesOperations operations
:vartype directory_role_templates: identity_directory_management.aio.operations.DirectoryRoleTemplatesOperations
:ivar domains_domain: DomainsDomainOperations operations
:vartype domains_domain: identity_directory_management.aio.operations.DomainsDomainOperations
:ivar domains: DomainsOperations operations
:vartype domains: identity_directory_management.aio.operations.DomainsOperations
:ivar organization_organization: OrganizationOrganizationOperations operations
:vartype organization_organization: identity_directory_management.aio.operations.OrganizationOrganizationOperations
:ivar organization: OrganizationOperations operations
:vartype organization: identity_directory_management.aio.operations.OrganizationOperations
:ivar subscribed_skus_subscribed_sku: SubscribedSkusSubscribedSkuOperations operations
:vartype subscribed_skus_subscribed_sku: identity_directory_management.aio.operations.SubscribedSkusSubscribedSkuOperations
:ivar users: UsersOperations operations
:vartype users: identity_directory_management.aio.operations.UsersOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = IdentityDirectoryManagementConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.contacts_org_contact = ContactsOrgContactOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contacts = ContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contracts_contract = ContractsContractOperations(
self._client, self._config, self._serialize, self._deserialize)
self.contracts = ContractsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.devices_device = DevicesDeviceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.devices = DevicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_directory = DirectoryDirectoryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory = DirectoryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_administrative_units = DirectoryAdministrativeUnitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_roles_directory_role = DirectoryRolesDirectoryRoleOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_roles = DirectoryRolesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_role_templates_directory_role_template = DirectoryRoleTemplatesDirectoryRoleTemplateOperations(
self._client, self._config, self._serialize, self._deserialize)
self.directory_role_templates = DirectoryRoleTemplatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains_domain = DomainsDomainOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.organization_organization = OrganizationOrganizationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.organization = OrganizationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subscribed_skus_subscribed_sku = SubscribedSkusSubscribedSkuOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IdentityDirectoryManagement":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
]
| |
e330b2c4ec0d709e389aa70c7b230a248e40cdff | 500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6 | /book_exercise/py_intro/basics/Chapter 3: Numbers/name_random.py | f71fc4d460bda5325b87858fc9109a256951f46c | []
| no_license | carloslvm/learning-python | b3796a0a5b751baae8c551a9f6fe262f98980691 | 07f885454cf21b7d215a58da7fcb907715e546bd | refs/heads/master | 2022-07-27T21:39:11.937801 | 2022-07-09T17:47:56 | 2022-07-09T17:47:56 | 163,447,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | #!/usr/bin/python3
# Printing a name randomly between 1 and 10.
from random import randint
for name in range(randint(1, 10)+ 1):
name = 'David'
print(name)
| [
"[email protected]"
]
| |
2c3b054d93f45003c87f98cb0129da2c90a07b02 | 5551361c02ee4a78036e2452fea615fc912f406b | /tut4.py | 465358ee61083a2301be6d3e8df1a5bc8be26084 | [
"MIT"
]
| permissive | Demfier/cs116-tut-solns | 3d93752e4ca2d3debbb36c901a13e7201e5bf0fe | 8277dae848ebf66522245fe15492ab91e48dac93 | refs/heads/master | 2020-12-13T13:32:52.551815 | 2020-02-14T05:25:45 | 2020-02-14T05:25:45 | 234,433,306 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | # This file contains solutions to CS116, Tutorial 4
import math
import check
# CQ1: E)
def create_cards(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda x, y: [x, y], values, suits))
def create_cards_alt(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: [x[i], y[i]], range(len(values))))
# Tests for create_cards go here
def choose_by_color(loC, color): # Abs. list impl. (really unoptimized!!)
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
else:
lookup_list = ['spades', 'clubs']
return list(map(lambda x: x[0], filter(lambda x: x[1] in lookup_list, loC)))
def filter_and_convert(loC, lookup_list, val_list):
if loC == []:
return val_list
if loC[0][1] in lookup_list:
val_list.append(loC[0][0])
return filter_and_convert(loC[1:], lookup_list, val_list)
def choose_by_color(loC, color): # recursive impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
elif color == 'black':
lookup_list = ['spades', 'clubs']
return filter_and_convert(loC, lookup_list, [])
# Tests for choose_by_color go here
def flip_color(c): # fancy impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
flip_list_1 = ['hearts', 'spades']
flip_list_2 = ['diamonds', 'clubs']
# new_index = len(flip_list) - index of curr suit in flip_list - 1
if c[1] in flip_list_1:
new_index = 1-flip_list_1.index(c[1])
c[1] = flip_list_1[new_index]
else:
new_index = 1-flip_list_2.index(c[1])
c[1] = flip_list_2[new_index]
def flip_color(c): # bland impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if c[1] == 'spades':
c[1] = 'hearts'
elif c[1] == 'hearts':
c[1] = 'spades'
elif c[1] == 'diamonds':
c[1] = 'clubs'
else:
c[1] = 'diamonds'
# Tests for flip_color go here
def flip_hand_helper(loC, pos):
if pos == len(loC) or loC == []:
return loC
flip_color(loC[pos])
return flip_hand_helper(loC, pos+1)
def flip_hand(loC):
return flip_hand_helper(loC, 0)
# Tests for flip_hand go here
def last_occ_index(list_of_vals, val, pos):
if pos < 0:
return -1
if list_of_vals[pos] == val:
return pos
return last_occ_index(list_of_vals, val, pos-1)
def modify_list(nums, n):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if n not in nums:
nums.append(n)
elif nums.count(n) == 1:
nums.remove(n)
elif nums.count(n) >= 2:
nums.remove(n)
nums.pop(last_occ_index(nums, n, len(nums) - 1))
# Tests for modify_list go here
def sanitize(s):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return ''.join(list(filter(lambda c: c.isalnum(), s)))
# Tests for sanitize go here
def reversed_list(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: L[-(i+1)], range(len(L))))
def reversed_list_alt(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(L.pop, [-1]*len(L)))
# Tests for reversed_list go here
| [
"[email protected]"
]
| |
2cc437f24c473125f7825b073b35dbc910657b40 | 963cac9e78c4b742f7e7800200de8d1582799955 | /lib/veetou/pzh/pzhmodel_.py | fe393d78cc1da6d7aec46d2741a126f14b156e44 | []
| no_license | ptomulik/veetou | c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa | b30be2a604f4426f832ec9805547ecd6cc9083fe | refs/heads/master | 2021-01-22T17:28:57.271251 | 2019-01-05T01:46:43 | 2020-05-04T16:23:44 | 85,016,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,677 | py | # -*- coding: utf8 -*-
"""`veetou.pzh.pzmodel_`
Defines data model for pzh (Protokół Zaliczeń - HTML)
"""
from veetou.model import *
__all__ = ( 'PzHReport',
'PzHPreamble',
'PzHTr',
'PzHSummary',
'PzHDataModel' )
##def strn(s=None):
## if s is None:
## return None
## else:
## return str(s)
PzHReport = declare( DataType, 'PzhReport',
('source', 'datetime'),
# 5 * (strn,),
plural = 'PzhReports'
)
PzHPreamble = declare( DataType, 'PzhPreamble',
( 'title', 'sheet_id', 'semester_code', 'sheet_serie',
'sheet_number', 'sheet_type', 'sheet_state', 'subj_name',
'subj_department', 'subj_code', 'subj_grade_type', 'subj_tutor',
'return_date', 'approved_by', 'modified_datetime', 'modified_date',
'modified_time', 'return_deadline'),
## 17 * (strn,),
plural = 'PzhPreambles'
)
PzHTr = declare( DataType, 'PzhTr',
( 'tr_ord_no', 'student_name', 'student_index', 'subj_grade',
'subj_grade_final', 'subj_grade_project', 'subj_grade_lecture',
'subj_grade_class', 'subj_grade_lab', 'subj_grade_seminar',
'subj_grade_p', 'subj_grade_n', 'edited_by', 'edited_datetime',
'edited_date', 'edited_time' ),
## 16 * (strn,),
plural = 'PzhTrs'
)
PzHSummary = declare( DataType, 'PzhSummary',
( 'caption', 'th', 'content' ),
## 3 * (strn,),
plural = 'PzhSummaries'
)
class PzHDataModel(DataModel):
_datatypes = ( PzHReport,
PzHPreamble,
PzHTr,
PzHSummary )
def _mk_initial_tables(self):
tables = map( lambda t: (tablename(t), t), map(lambda dt : tableclass(dt)(), self._datatypes))
self.tables.update(tables)
def _mk_initial_relations(self):
strings = ( ( 'pzh_report_preamble', ('pzh_reports', 'pzh_preambles'), ('pzh_preamble', 'pzh_report') ),
( 'pzh_report_trs', ('pzh_reports', 'pzh_trs'), ('pzh_trs', 'pzh_report') ) )#,
#( 'report_summary', ('reports', 'summaries'), ('summary', 'report') ) )
relations = map( lambda x : (x[0],Junction(map(self.tables.__getitem__,x[1]),x[2])), strings )
self.relations.update(relations)
def __init__(self):
super().__init__()
self._mk_initial_tables()
self._mk_initial_relations()
@property
def prefix(self):
return 'pzh_'
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| [
"[email protected]"
]
| |
02afa1f3ac1912f2e42968b1a9f8c171135d839e | fa795af74cda4d92604fa3332179ba939460a9b5 | /JUBioactivities/QSARDB/Papa_Property_pkNO3_Degradation_by_NO3_radicals_as_logkNO3/__init__.py | ed8bd3df2d2321a9d042e6cc65b02e98c183d8a1 | []
| no_license | JenniferHemmerich/JUBioactivities | 7329a89db0e2790aff9bcfe153ab4dcd2c19a489 | 87054ac135d91e034dcfb6028562b4a7930a3433 | refs/heads/master | 2020-04-26T03:56:36.177955 | 2019-03-07T13:08:08 | 2019-03-07T13:08:08 | 173,284,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import os.path
import glob
import pandas as pd
import numpy as np
from ... import utils
__data_src__ = list(sorted(glob.glob(os.path.join(__path__[0], "compounds/0*.mol"))))
__data_src__ += [os.path.join(__path__[0], "properties/pkNO3.txt")]
def read_data(raw=False):
df = pd.DataFrame({'pkNO3_Index_Papa': np.loadtxt(__data_src__[-1], usecols=1, skiprows=1, delimiter='\t')},
index=__data_src__[:-1])
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='cont')
return df
def read_structures(raw=False):
df = pd.DataFrame(index=__data_src__[:-1])
df = utils.get_smiles_from_index(df, filenames=True)
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='str')
return df | [
"[email protected]"
]
| |
c0d9310f0cd5790e4e0888b620c63cf325dc4d58 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-aom/huaweicloudsdkaom/v2/model/list_log_items_response.py | 4e3d6377a22a133cbef0e053c955f2b4d0817543 | [
"Apache-2.0"
]
| permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,115 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListLogItemsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'error_code': 'str',
'error_message': 'str',
'result': 'str'
}
attribute_map = {
'error_code': 'errorCode',
'error_message': 'errorMessage',
'result': 'result'
}
def __init__(self, error_code=None, error_message=None, result=None):
"""ListLogItemsResponse
The model defined in huaweicloud sdk
:param error_code: 响应码,SVCSTG_AMS_2000000代表正常返回。
:type error_code: str
:param error_message: 响应信息描述。
:type error_message: str
:param result: 查询结果元数据信息,包括返回总数及结果。
:type result: str
"""
super(ListLogItemsResponse, self).__init__()
self._error_code = None
self._error_message = None
self._result = None
self.discriminator = None
if error_code is not None:
self.error_code = error_code
if error_message is not None:
self.error_message = error_message
if result is not None:
self.result = result
@property
def error_code(self):
"""Gets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:return: The error_code of this ListLogItemsResponse.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:param error_code: The error_code of this ListLogItemsResponse.
:type error_code: str
"""
self._error_code = error_code
@property
def error_message(self):
"""Gets the error_message of this ListLogItemsResponse.
响应信息描述。
:return: The error_message of this ListLogItemsResponse.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this ListLogItemsResponse.
响应信息描述。
:param error_message: The error_message of this ListLogItemsResponse.
:type error_message: str
"""
self._error_message = error_message
@property
def result(self):
"""Gets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:return: The result of this ListLogItemsResponse.
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:param result: The result of this ListLogItemsResponse.
:type result: str
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListLogItemsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
fc80dafb1d1d6e219b60357d8bd2a5f407c26ca4 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /google/cloud/aiplatform_v1beta1/types/index.py | 289ef763b8977f8503af013acbc9cfaa2abd7f63 | [
"Apache-2.0"
]
| permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 5,038 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"Index",},
)
class Index(proto.Message):
r"""A representation of a collection of database items organized
in a way that allows for approximate nearest neighbor (a.k.a
ANN) algorithms search.
Attributes:
name (str):
Output only. The resource name of the Index.
display_name (str):
Required. The display name of the Index.
The name can be up to 128 characters long and
can be consist of any UTF-8 characters.
description (str):
The description of the Index.
metadata_schema_uri (str):
Immutable. Points to a YAML file stored on Google Cloud
Storage describing additional information about the Index,
that is specific to it. Unset if the Index does not have any
additional information. The schema is defined as an OpenAPI
3.0.2 `Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
Note: The URI given on output will be immutable and probably
different, including the URI scheme, than the one given on
input. The output URI will point to a location where the
user only has a read access.
metadata (google.protobuf.struct_pb2.Value):
An additional information about the Index; the schema of the
metadata can be found in
[metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri].
deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]):
Output only. The pointers to DeployedIndexes
created from this Index. An Index can be only
deleted if all its DeployedIndexes had been
undeployed first.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]):
The labels with user-defined metadata to
organize your Indexes.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was most recently
updated. This also includes any update to the contents of
the Index. Note that Operations working on this Index may
have their
[Operations.metadata.generic_metadata.update_time]
[google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
a little after the value of this timestamp, yet that does
not mean their results are not already reflected in the
Index. Result of any successfully completed Operation on the
Index is reflected in it.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
metadata_schema_uri = proto.Field(proto.STRING, number=4,)
metadata = proto.Field(proto.MESSAGE, number=6, message=struct_pb2.Value,)
deployed_indexes = proto.RepeatedField(
proto.MESSAGE, number=7, message=deployed_index_ref.DeployedIndexRef,
)
etag = proto.Field(proto.STRING, number=8,)
labels = proto.MapField(proto.STRING, proto.STRING, number=9,)
create_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
]
| |
a0b4d2b6558019c0e406f1ef097a97fcefb6b50f | e5f49057eac43349a7fa999d90cb951e49617440 | /filter/docclass.py | e6a113f9f13d3049b9b891fe7adaa77184535832 | []
| no_license | Hsingmin/CI_py2.7 | 2ae9464c687a1ecfadc7928c6e4915d828ffc10e | ef2906755d498a054beec20a99c4784351816cce | refs/heads/master | 2021-08-30T06:23:09.630058 | 2017-12-16T13:01:19 | 2017-12-16T13:01:19 | 110,184,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,701 | py |
# docclass.py
import re
import math
def sampletrain(c1):
c1.train('Nobody owns the water.', 'good')
c1.train('the quick rabbit jumps fences', 'good')
c1.train('buy pharmaceuticals now', 'bad')
c1.train('make quick money at the online casino', 'bad')
c1.train('the quick brown fox jumps', 'good')
def getwords(doc):
splitter = re.compile('\\W*')
words = [s.lower() for s in splitter.split(doc) if len(s)>2 and len(s)<20]
return dict([w,1] for w in words)
class classifier:
def __init__(self, getfeatures, filename = None):
self.fc = {}
self.cc = {}
self.getfeatures = getfeatures
self.thresholds = {}
def incf(self, f, cat):
self.fc.setdefault(f, {})
self.fc[f].setdefault(cat, 0)
self.fc[f][cat] += 1
def incc(self, cat):
self.cc.setdefault(cat, 0)
self.cc[cat] += 1
def fcount(self, f, cat):
if f in self.fc and cat in self.fc[f]:
return float(self.fc[f][cat])
return 0.0
def catcount(self, cat):
if cat in self.cc:
return float(self.cc[cat])
return 0
def totalcount(self):
return sum(self.cc.values())
def categories(self):
return self.cc.keys()
def train(self, item, cat):
features = self.getfeatures(item)
for f in features:
self.incf(f, cat)
self.incc(cat)
def fprob(self, f, cat):
if self.catcount(cat) == 0:
return 0
return self.fcount(f, cat)/self.catcount(cat)
def weightedprob(self, f, cat, prf, weight=1.0, ap=0.5):
basicprob = prf(f, cat)
totals = sum([self.fcount(f,c) for c in self.categories()])
bp = ((weight*ap) + (totals*basicprob))/(weight+totals)
return bp
def setthresholds(self, cat, t):
self.thresholds[cat] = t
def getthresholds(self, cat):
if cat not in self.thresholds:
return 1.0
return self.thresholds[cat]
class naivebayes(classifier):
def docprob(self, item, cat):
features = self.getfeatures(item)
p = 1
for f in features:
p *= self.weightedprob(f, cat, self.fprob)
return p
def prob(self, item, cat):
catprob = self.catcount(cat)/self.totalcount()
docprob = self.docprob(item, cat)
return catprob*docprob
def classify(self, item, default = None):
probs = {}
max = 0.0
for cat in self.categories():
probs[cat] = self.prob(item, cat)
if probs[cat] > max:
max = probs[cat]
best = cat
for cat in probs:
if cat == best:
continue
if probs[cat]*self.getthresholds(best)>probs[best]:
return default
return best
class fisherclassifier(classifier):
def __init__(self, getfeatures):
classifier.__init__(self, getfeatures)
self.minimums = {}
def setminimum(self, cat, min):
self.minimums[cat] = min
def getminimum(self, cat):
if cat not in self.minimums:
return 0
return self.minimums[cat]
def classify(self, item, default = None):
best = default
max = 0.0
for c in self.categories():
p = self.fisherprob(item, c)
if p > self.getminimum(c) and p > max:
best = c
max = p
return best
def cprob(self, f, cat):
clf = self.fprob(f, cat)
if clf == 0:
return 0
freqsum = sum([self.fprob(f, c) for c in self.categories()])
p = clf/freqsum
return p
def fisherprob(self, item, cat):
p = 1
features = self.getfeatures(item)
for f in features:
p *= (self.weightedprob(f, cat, self.cprob))
fscores = -2*math.log(p)
return self.invchi2(fscores, len(features)*2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m/i
sum += term
return min(sum, 1.0)
| [
"[email protected]"
]
| |
86aa8e4a31017d6d63b19ac4cd3b040d922f3902 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200628181659.py | ef1424ea237252dfb40fa01bde4bf24ab2c06ba7 | []
| no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 2,603 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
import lxml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593305918113.1593310282256.42; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593310282; _lxsdk_s=172f8db8281-bbf-e4f-981%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
yield scrapy.Request(url=url,headers=self.header,callback=self.parse)
def parse(self, response):
selec
soup = bs(response.text,'html.parser')
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):
item = MaoyanspidersItem()
title = i.find('p',attrs={'class':'name'}).find('a')
name = title.get('title')
link = 'https://maoyan.com'+ title.get('href')
time = i.find('p',attrs={'class' : 'releasetime'}).text
item['films_name'] = name
item['release_time'] = time
print(link)
yield scrapy.Request(url=link, headers = self.header, meta={'item':item},callback=self.parse1)
def parse1(self, response):
item = response.meta['item']
# soup = bs(response.text,'html.parser')
soup = bs('./week01/homework02/1375.html')
type = soup.find('div',attrs={'class' :'banner'}).find_all('li')[0].text.replace('\n',' ')
print(soup)
# print(type)
item['films_type'] = type
print(item)
yield item
| [
"[email protected]"
]
| |
42c55265daabb2470cae40ea23ca66ff4211931f | 94567834d7ca69fba178a9d2d0ae89a73f813741 | /analyses/monthly_time_series/China_prmsl/plot_ts.py | 630af662e02fee6a55708d2f481b49b4f71496c9 | []
| no_license | philip-brohan/Yangtze_floods | 41b6d655fd4f06f8129c4e5c10d51d5e74d6cec4 | 8ad376328f5b7866d82dd3613e6157cfa31abea1 | refs/heads/master | 2021-09-07T23:56:51.250070 | 2021-08-23T16:43:58 | 2021-08-23T16:43:58 | 243,798,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | #!/usr/bin/env python
# 20CRv3 time-series: Monthly average, regional average.
# Each ensemble member as a seperate line.
# Uses pre-calculated time-series.
import os
import iris
import numpy
import datetime
import pickle
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
start=datetime.datetime(1926,1,1,0,0)
end=datetime.datetime(1935,12,31,23,59)
ylim = (-300,250)
dts=[]
ndata=None
for year in range(start.year,end.year+1,1):
sfile="%s/20CR/version_3/analyses/Yangtze_ts/PRMSL_v3/%04d.pkl" % \
(os.getenv('SCRATCH'),year)
with open(sfile, "rb") as f:
(ndyr,dtyr) = pickle.load(f)
dts.extend([dtyr[0:11]])
if ndata is None:
ndata = ndyr[0:11,:]
else:
ndata = numpy.ma.concatenate((ndata,ndyr[0:11,:]))
# Plot the resulting array as a set of line graphs
fig=Figure(figsize=(19.2,6), # Width, Height (inches)
dpi=300,
facecolor=(0.5,0.5,0.5,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
font = {'family' : 'sans-serif',
'sans-serif' : 'Arial',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
# Plot the lines
ax = fig.add_axes([0.05,0.05,0.93,0.93],
xlim=((start-datetime.timedelta(days=1)),
(end+datetime.timedelta(days=1))),
ylim=ylim)
ax.set_ylabel('PRMSL anomaly')
for m in range(80):
ax.add_line(Line2D(dts,
ndata[:,m],
linewidth=0.5,
color=(0,0,1,1),
alpha=0.1,
zorder=200))
fig.savefig('PRMSL_ts.png')
| [
"[email protected]"
]
| |
a7a89e0b98c823da3182800cda0c3e9b0acfaecc | 09a1d8a920ddb9193dfcc9b05ddd842b83b18e0d | /aerosandbox_legacy_v0/examples_legacy_v0/vlm4_conventional.py | 6d4ece711f7bb8d718aaa5d3f6e9995720f1a915 | [
"MIT"
]
| permissive | aqreed/AeroSandbox | 8564b6adb1f297e94aec96872b55f59171ae8ac1 | a0c5f3b2760fcddee28cff2715eeddcb8bcbe655 | refs/heads/master | 2021-03-24T21:02:14.881986 | 2020-03-15T22:43:55 | 2020-03-15T22:43:55 | 247,564,677 | 1 | 0 | MIT | 2020-03-15T22:46:25 | 2020-03-15T22:46:24 | null | UTF-8 | Python | false | false | 3,281 | py | from aerosandbox import *
glider = Airplane(
name="Conventional",
xyz_ref=[0, 0, 0], # CG location
wings=[
Wing(
name="Main Wing",
xyz_le=[0, 0, 0], # Coordinates of the wing's leading edge
symmetric=True,
xsecs=[ # The wing's cross ("X") sections
WingXSec( # Root
xyz_le=[0, 0, 0], # Coordinates of the XSec's leading edge, relative to the wing's leading edge.
chord=0.18,
twist=2, # degrees
airfoil=Airfoil(name="naca4412"),
control_surface_type='symmetric', # Flap # Control surfaces are applied between a given XSec and the next one.
control_surface_deflection=0, # degrees
control_surface_hinge_point=0.75 # as chord fraction
),
WingXSec( # Mid
xyz_le=[0.01, 0.5, 0],
chord=0.16,
twist=0,
airfoil=Airfoil(name="naca4412"),
control_surface_type='asymmetric', # Aileron
control_surface_deflection=30,
control_surface_hinge_point=0.75
),
WingXSec( # Tip
xyz_le=[0.08, 1, 0.1],
chord=0.08,
twist=-2,
airfoil=Airfoil(name="naca4412"),
)
]
),
Wing(
name="Horizontal Stabilizer",
xyz_le=[0.6, 0, 0.1],
symmetric=True,
xsecs=[
WingXSec( # root
xyz_le=[0, 0, 0],
chord=0.1,
twist=-10,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Elevator
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec( # tip
xyz_le=[0.02, 0.17, 0],
chord=0.08,
twist=-10,
airfoil=Airfoil(name="naca0012")
)
]
),
Wing(
name="Vertical Stabilizer",
xyz_le=[0.6, 0, 0.15],
symmetric=False,
xsecs=[
WingXSec(
xyz_le=[0, 0, 0],
chord=0.1,
twist=0,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Rudder
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec(
xyz_le=[0.04, 0, 0.15],
chord=0.06,
twist=0,
airfoil=Airfoil(name="naca0012")
)
]
)
]
)
# glider.set_paneling_everywhere(20, 20)
ap = vlm4(
airplane=glider,
op_point=OperatingPoint(
velocity=10,
alpha=5,
beta=0,
p=0,
q=0,
r=0,
),
)
ap.run()
ap.draw()
# Answer you should get: (XFLR5)
# CL = 0.797
# CDi = 0.017
# CL/CDi = 47.211
| [
"[email protected]"
]
| |
920bde8494004fccb4a049249d10f17b7726fe68 | f0181afd2eea9b086ce9487fb8d7fd949282140a | /bin/countgenbank.py | 173a4ff2ea62bc564b9bd89f321a8135b513e0b3 | [
"MIT"
]
| permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 1,679 | py | """
Count features in a genbank file or directory of files
"""
import os
import sys
import argparse
from roblib import message, genbank_seqio
__author__ = 'Rob Edwards'
__copyright__ = 'Copyright 2020, Rob Edwards'
__credits__ = ['Rob Edwards']
__license__ = 'MIT'
__maintainer__ = 'Rob Edwards'
__email__ = '[email protected]'
def count_feats(gbkf, verbose=False):
if verbose:
message(f"Reading {gbkf}", "BLUE")
count = {}
for seq in genbank_seqio(gbkf):
for feat in seq.features:
count[feat.type] = count.get(feat.type, 0) + 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-f', help='genbank file')
parser.add_argument('-d', help='directory of genbank files')
parser.add_argument('-t', help='feature type(s) (at least one must be provided)', nargs="+")
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
files = []
if args.f:
files.append(args.f)
if args.d:
for f in os.listdir(args.d):
files.append(os.path.join(args.d, f))
if len(files) == 0:
message("Fatal. Either -d or -f is required", "RED")
if len(args.t) == 0:
message("Fatal. Please provide at least one feature type to count", "RED")
print("File", end="")
for t in args.t:
print(f"\t{t}", end="")
print()
for f in files:
c = count_feats(f, args.v)
print(f, end="")
for t in args.t:
if t in c:
print(f"\t{c[t]}", end="")
else:
print("\t0")
print()
| [
"[email protected]"
]
| |
4079d5185261835ffa9df17e29142566cf46c3bd | dece3eb22be792aeac65ea12a1f183dd73498add | /coding/Mysql/1.py | 10119b94c419e57e3114923e1eb5292e80410ffd | []
| no_license | santanu5670/Python | 352515ad82f94157e7f74467c5e7dedd6c9069c8 | 48c2779ccf934588f2bfce7cd322088acec04080 | refs/heads/master | 2023-06-24T09:22:49.925654 | 2021-07-22T15:17:34 | 2021-07-22T15:17:34 | 387,683,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import mysql.connector
mydb=mysql.connector.connect(host='localhost',user='nsec',password='nsec',database='mysql')
print(mydb)
if(mydb):
print('Connection successful')
else:
print('Connection Unsuccessful') | [
"[email protected]"
]
| |
0ae5aa472863f78daed685a05bb3eafc6c6f559c | fb6e7922df3da2e9cdc37a00150d6d7663e907ff | /environment/rtfm/dynamics/item/weapon/tridents.py | 7f513dd0c963025c039f762d29b6e88477da154d | [
"Apache-2.0"
]
| permissive | Spiph/GTG | c54a587002c42a032c89e8eceb5ec638f6c8c05f | 4a45032290d0c1364e4398684582c51094b245f5 | refs/heads/main | 2023-09-02T14:44:14.946624 | 2021-10-27T12:29:05 | 2021-10-27T12:29:05 | 393,086,007 | 0 | 0 | Apache-2.0 | 2021-08-05T15:09:07 | 2021-08-05T15:09:07 | null | UTF-8 | Python | false | false | 465 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseTrident(Weapon):
pass
class Trident(BaseTrident):
def __init__(self):
super().__init__('trident', weight=25, damage=D.Dice.from_str('3d4'), material=M.Iron, hit=0)
| [
"[email protected]"
]
| |
259e794cad0040bcd4708de22d5d229d14681030 | c085b06c9eb220eb40b5ada840886c09a152f053 | /Libros de Python/web/web/ejer/tema-01/compose1.py.txt | d8040cca2291387aa8843870ff3af3f23cb0674a | []
| no_license | thanggc/libros | 7d3bf564c5a227f08390fbcc6721a0aed160e3e0 | 430c03fe97544d263b5c3a665327b527d9c223a8 | refs/heads/master | 2020-12-25T13:23:38.527089 | 2013-04-29T23:14:08 | 2013-04-29T23:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | txt |
def compose (*funcs):
def composed (x):
for f in reversed (funcs):
x = f (x)
return x
return composed
| [
"[email protected]"
]
| |
6ee20a4a8435db0bfc40638cceef71df51f88e65 | 4e4c5827ed94024d499982279ce611b893c03572 | /Azure Firewall/Script - Migrate Checkpoint config to Azure Firewall Policy/chkp2azfw.py | 3d99c0cb6d834b73264997c9c8125e14c234c1a6 | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | Azure/Azure-Network-Security | 19a51076e5eda76e9808845792421b82ea5afb84 | 32141bb734518d5ae51bed5f7ca824a01b04ab49 | refs/heads/master | 2023-08-30T20:53:07.435480 | 2023-08-28T15:55:12 | 2023-08-28T15:55:12 | 215,905,001 | 690 | 264 | MIT | 2023-09-11T06:38:17 | 2019-10-17T23:46:28 | Python | UTF-8 | Python | false | false | 43,780 | py | import argparse
import json
import re
import os
import sys
import copy
# https://docs.python.org/3/library/ipaddress.html
import ipaddress
# Helper functions
# Arguments
parser = argparse.ArgumentParser(description='Generate an ARM template to create a Rule Collection Group from a Checkpoint ruleset exported with the Show Package Tool (https://support.checkpoint.com/results/sk/sk120342).')
parser.add_argument('--json-index-file', dest='json_index_file', action='store',
default="./index.json",
help='Local file containing in JSON the links to the rest of the exported JSON files. The default is "./index.json"')
parser.add_argument('--policy-name', dest='policy_name', action='store',
default="azfwpolicy",
help='Name for the Azure Firewall Policy. The default is "azfwpolicy"')
parser.add_argument('--policy-sku', dest='policy_sku', action='store',
default="Standard",
help='SKU for the Azure Firewall Policy. Possible values: Standard, Premium (default: Standard)')
parser.add_argument('--do-not-create-policy', dest='dont_create_policy', action='store_true',
default=False,
help='If specified, do not include ARM code for the policy, only for the rule collection group. Use if the policy already exists.')
parser.add_argument('--rcg-name', dest='rcg_name', action='store',
default="importedFromCheckpoint",
help='Name for the Rule Collection Group to create in the Azure Firewall Policy. The default is "importedFromCheckpoint"')
parser.add_argument('--rcg-priority', dest='rcg_prio', action='store',
default="10000",
help='Priority for the Rule Collection Group to create in the Azure Firewall Policy. The default is "10000"')
parser.add_argument('--no-ip-groups', dest='use_ipgroups', action='store_false',
default=True,
help='Whether some address groups should be converted to Azure IP Groups (default: True)')
parser.add_argument('--no-app-rules', dest='use_apprules', action='store_false',
default=True,
help='Whether it will be attempted to convert network rules using HTTP/S to application rules. Note that this might be a problem if a explicit network deny exists (default: True)')
parser.add_argument('--max-ip-groups', dest='max_ipgroups', action='store', type=int, default=50,
help='Optional, maximum number of IP groups that will be created in Azure')
parser.add_argument('--rule-uid-to-name', dest='rule_id_to_name', action='store_true',
default=False,
help='Includes the UID of the Checkpoint rule in the name of the Azure rule, useful for troubleshooting (default: False)')
parser.add_argument('--remove-explicit-deny', dest='remove_explicit_deny', action='store_true',
default=False,
help='If a deny any/any is found, it will not be converted to the Azure Firewall syntax. Useful if using application rules (default: False)')
parser.add_argument('--output', dest='output', action='store',
default="none",
help='Output format. Possible values: json, none')
parser.add_argument('--pretty', dest='pretty', action='store_true',
default=False,
help='Print JSON in pretty mode (default: False)')
parser.add_argument('--log-level', dest='log_level_string', action='store',
default='warning',
help='Logging level (valid values: error/warning/info/debug/all/none. Default: warning)')
args = parser.parse_args()
# Variables
az_app_rcs = []
az_net_rcs = []
ipgroups = []
discarded_rules = []
rcg_name = args.rcg_name
rcg_prio = args.rcg_prio
rc_net_name = 'from-chkp-net'
rc_net_prio_start = "10000"
rc_app_name = 'from-chkp-app'
rc_app_prio_start = "11000"
cnt_apprules = 0
cnt_allow = 0
cnt_deny = 0
cnt_disabledrules = 0
cnt_apprules = 0
cnt_netrules_ip = 0
cnt_netrules_fqdn = 0
cnt_chkp_rules = 0
# Returns true if the string is a number
def is_number(value):
for character in value:
if character.isdigit():
return True
return False
# Returns a string formatted to be used as a name in Azure
def format_to_arm_name(name):
name = name.replace(".", "-")
name = name.replace("/", "-")
name = name.replace(" ", "_")
return name
# Returns true if the string is a UID
def is_uid(value):
if len(value) == 36 and value[8] == '-' and value[13] == '-' and value[18] == '-' and value[23] == '-':
return True
# Finds an object in a list by its UID
def find_uid(object_list, uid):
for object in object_list:
if object['uid'] == uid:
return object
return None
# Returns true if there is an IP group with the same chkp id
def is_ipgroup(ipgroup_list, uid):
for ipgroup in ipgroup_list:
if ipgroup['id'] == uid:
return True
return False
# Returns IP Group corresponding to the chkp id
def find_ipgroup(ipgroup_list, uid):
for ipgroup in ipgroup_list:
if ipgroup['id'] == uid:
return ipgroup
return None
# True if parameter is a valid FQDN according to RFCs 952, 1123
def is_fqdn(str_var):
return bool(re.match(r"(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,4}$)",str(str_var)))
# True if parameter is a valid IP address (with or without mask)
# The regex is quite simple (for example it would match 999.999.999.999/99), but we assume that the IP addresses in the original policy are valid
def is_ipv4(str_var):
return bool(re.match(r"^([0-9]{1,3}\.){3}[0-9]{1,3}($|/[0-9]{1,2}$)",str(str_var)))
# Perform some checks on the rule to add, and append it to the list of rules provided in the 2nd argument
# Some rules need to be broken down in multiple ones, so the function adds a suffix to the created rules in this case
def append_rule(rule_to_be_appended, rules_to_append_to):
if log_level >= 8:
print("DEBUG: appending to rules:", str(rule_to_be_appended), file=sys.stderr)
src_fields = ('sourceAddresses', 'sourceIpGroups', 'sourceServiceTags')
dst_fields = ('destinationAddresses', 'destinationIpGroups', 'destinationFqdns', 'destinationServiceTags')
all_fields = src_fields + dst_fields
# Count how many rules we will be splitting (to avoid unnecessary suffixes if there is only one rule)
total_rule_no = 0
for src_field in src_fields:
for dst_field in dst_fields:
if len(rule_to_be_appended[src_field]) > 0 and len(rule_to_be_appended[dst_field]) > 0:
total_rule_no += 1
# Process the rule
split_rule_counter = 0
for src_field in src_fields:
for dst_field in dst_fields:
# Only look at combinations where the src_field and dst_field are non-zero
if len(rule_to_be_appended[src_field]) > 0 and len(rule_to_be_appended[dst_field]) > 0:
# Should we split a rule that contains both IP addresses and service tags in either sourceAddresses or destinationAddresses?
temp_rule = copy.copy(rule_to_be_appended)
split_rule_counter += 1
if total_rule_no > 1:
temp_rule['name'] = temp_rule['name'] + '-' + str(split_rule_counter)
else:
temp_rule['name'] = temp_rule['name']
# Blank all the rest fields
for blank_field in all_fields:
if blank_field != src_field and blank_field != dst_field:
temp_rule [blank_field] = []
rules_to_append_to.append(temp_rule)
# The fields 'sourceServiceTags' and 'destinationServiceTags' are not supported in Azure Firewall, so we need to change them to 'sourceAddresses' and 'destinationAddresses'
if src_field == 'sourceServiceTags':
temp_rule['sourceAddresses'] = temp_rule['sourceServiceTags']
temp_rule.pop('sourceServiceTags')
if dst_field == 'destinationServiceTags':
temp_rule['destinationAddresses'] = temp_rule['destinationServiceTags']
temp_rule.pop('destinationServiceTags')
if split_rule_counter > 1:
if log_level >= 7:
print("DEBUG: Checkpoint rule {0} has been split in {1} Azure Firewall rules".format(rule_to_be_appended['name'], split_rule_counter), file=sys.stderr)
return rules_to_append_to
# Recursively finds all members of objects by their UID
def find_members(object_group_list, uid_list, member_list=[], debug=False, mode='ip'):
# if debug:
# print("DEBUG: looking for UIDs '{0}'...".format(str(uid_list)), file=sys.stderr)
# Make sure that the uid is a list
if not isinstance(uid_list, list):
uid_list = [uid_list]
# Loop through all objects
for object_group in object_group_list:
if object_group['uid'] in uid_list:
# if debug:
# print('DEBUG: found matching object', str(object_group), file=sys.stderr)
if 'members' in object_group:
if len(object_group['members']) > 0:
for member in object_group['members']:
if is_uid(member):
member_list = find_members(object_group_list, member, member_list=member_list)
else:
if debug:
print('DEBUG: object group {0} has no members.'.format(str(object_group['name'])), file=sys.stderr)
elif object_group['type'] == 'network':
member_list.append(object_group['subnet4'] + '/' + str(object_group['mask-length4']))
elif object_group['type'] == 'host':
member_list.append(object_group['ipv4-address'] + '/32')
elif object_group['type'] == 'dns-domain':
member_list.append(str(object_group['name'])[1:]) # In checkpoint syntax, fqdn starts with a dot
elif object_group['type'] == 'dynamic-object': # Service Tag "AVDServiceRanges"
if debug:
print('DEBUG: adding dynamic-object {0}'.format(object_group['name']), str(object_group), file=sys.stderr)
if object_group['name'] == 'AVDServiceRanges':
member_list.append('WindowsVirtualDesktop')
else:
if log_level >= 3:
print('ERROR: dynamic-object {0} cannot be mapped to an Azure service tag'.format(object_group['name']), file=sys.stderr)
elif object_group['type'] == 'service-tcp':
member_list.append(('tcp', object_group['port']))
elif object_group['type'] == 'service-udp':
member_list.append(('udp', object_group['port']))
elif object_group['type'] == 'service-icmp':
member_list.append(('icmp', '*'))
elif object_group['type'] == 'CpmiAnyObject':
if (mode == 'ip'):
member_list.append('*')
else:
member_list.append(('any', '*'))
elif object_group['type'] == 'RulebaseAction':
member_list.append(object_group['name'])
elif object_group['type'] in ('CpmiGatewayCluster', 'CpmiClusterMember', 'CpmiHostCkp', 'simple-cluster', 'Global'):
if debug:
print('DEBUG: ignoring object type', object_group['type'], file=sys.stderr)
else:
if debug:
print('DEBUG: unknown object type', object_group['type'], file=sys.stderr)
return list(set(member_list))
# Set log_level
if is_number(args.log_level_string):
try:
log_level = int(args.log_level_string)
except:
log_level = 4
else:
if args.log_level_string == 'error':
log_level = 3
elif args.log_level_string == 'warning':
log_level = 4
elif args.log_level_string == 'notice':
log_level = 5
elif args.log_level_string == 'info':
log_level = 6
elif args.log_level_string == 'debug' or args.log_level_string == 'all':
log_level = 7
elif args.log_level_string == 'debugplus' or args.log_level_string == 'all':
log_level = 8
elif args.log_level_string == 'none':
log_level = 0
else:
log_level = 4 # We default to 'warning'
# Get JSON index file list from the specified folder
if log_level > 7:
print ("DEBUG: Loading file {0}...".format(args.json_index_file), file=sys.stderr)
try:
with open(args.json_index_file) as f:
json_index = json.load(f)
except Exception as e:
if log_level >= 3:
print("ERROR: Error when opening JSON index file", args.json_index_file, "-", str(e), file=sys.stderr)
sys.exit(0)
# Go through the files and create the objects
access_layers = []
threat_layers = []
nat_layers = []
for package in json_index['policyPackages']:
if 'objects' in package:
if log_level >= 7:
print ("DEBUG: Objects section found, file {0}...".format(package['objects']['htmlObjectsFileName']), file=sys.stderr)
filename = package['objects']['htmlObjectsFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(package['objects']['htmlObjectsFileName'])[0]+'.json'
with open(filename) as f:
policy_objects = json.load(f)
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file", filename, "-", str(e), file=sys.stderr)
pass
if 'accessLayers' in package:
for layer in package['accessLayers']:
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: Access layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(layer['htmlFileName'])[0]+'.json'
with open(filename) as f:
access_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for access layer", filename, "-", str(e), file=sys.stderr)
pass
if 'threatLayers' in package:
for layer in package['threatLayers']:
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: Threat layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
filename = os.path.splitext(layer['htmlFileName'])[0] + '.json'
with open(filename) as f:
threat_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for threat layer", filename, "-", str(e), file=sys.stderr)
pass
if 'natLayer' in package:
layer = package['natLayer']
if 'htmlFileName' in layer:
if log_level >= 7:
print ("DEBUG: NAT layer found, file {0}...".format(layer['htmlFileName']), file=sys.stderr)
filename = layer['htmlFileName']
try:
# Try to open the file with JSON extension
filename = os.path.splitext(layer['htmlFileName'])[0]+'.json'
with open(filename) as f:
# nat_layer = json.load(f)
nat_layers.append(json.load(f))
if log_level >= 7:
print ("DEBUG: File {0} loaded successfully".format(filename), file=sys.stderr)
except Exception as e:
if log_level >= 4:
print("WARNING: Error when opening JSON file for NAT layer", filename, "-", str(e), file=sys.stderr)
pass
# Inspect the imported objects
# policy_object_types = []
# for policy_object in policy_objects:
# if 'type' in policy_object:
# if not policy_object['type'] in policy_object_types:
# policy_object_types.append(policy_object['type'])
# if log_level >= 7:
# print('Policy object types found:', str(policy_object_types))
# Policy object types found: ['vpn-community-star', 'RulebaseAction', 'CpmiAnyObject', 'service-group', 'group', 'Track', 'Global', 'service-tcp', 'network', 'dynamic-object', 'host', 'CpmiHostCkp', 'service-icmp', 'service-other', 'threat-profile', 'ThreatExceptionRulebase', 'service-udp', 'dns-domain', 'simple-cluster', 'CpmiClusterMember']
# Inspect the imported access layers
def inspect_access_layers(layer_list):
for layer in layer_list:
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule':
# Rule Name
rule_name = rule['name'] if len(rule['name']) <= 38 else rule['name'][:38]
# action/src/dst/svc object Members
rule_action_members_str = str(find_members(policy_objects, rule['action'], member_list=[])[0])
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip')
rule_src_members_str = str(rule_src_members) if len(str(rule_src_members)) <= 38 else str(rule_src_members)[:38]
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip')
rule_dst_members_str = str(rule_dst_members) if len(str(rule_dst_members)) <= 38 else str(rule_dst_members)[:38]
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc')
rule_svc_members_str = str(rule_svc_members) if len(str(rule_svc_members)) <= 38 else str(rule_svc_members)[:38]
# For each group ID used as source or destination, create an IP group object
if len(rule_src_members) > 0:
for src in rule['source']:
if not is_ipgroup(ipgroups, src):
ipgroups.append({'id': src, 'members': rule_src_members, 'member_count': len(rule_src_members), 'name': find_uid(policy_objects, src)['name']})
if len(rule_dst_members) > 0:
for dst in rule['destination']:
if not is_ipgroup(ipgroups, dst):
ipgroups.append({'id': dst, 'members': rule_dst_members, 'member_count': len(rule_dst_members), 'name': find_uid(policy_objects, dst)['name']})
elif rule['type'] == 'nat-rule':
if log_level >= 7:
print('DEBUG: processing NAT rule', rule['rule-number'], file=sys.stderr)
elif rule['type'] == 'threat-rule':
if log_level >= 7:
print('DEBUG: processing Threat rule', rule['rule-number'], file=sys.stderr)
else:
if log_level >= 7:
print('DEBUG: ignoring rule of type', rule['type'], file=sys.stderr)
else:
print('ERROR: Rule is not a dictionary or does not contain a type key:', str(rule), file=sys.stderr)
def print_access_layer_rule(layer_list, rule_id_list, debug=False):
for layer in layer_list:
if log_level >= 7:
print('{0:<40}{1:<40}{2:<40}{3:<40}{4:<40}'.format('Name', 'Action', 'Source', 'Destination', 'Service'), file=sys.stderr)
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule' and rule['uid'] in rule_id_list:
# Rule Name
rule_name = rule['name'] if len(rule['name']) <= 38 else rule['name'][:38]
# action/src/dst/svc object Members
rule_action_members_str = str(find_members(policy_objects, rule['action'], member_list=[])[0])
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip', debug=debug)
rule_src_members_str = str(rule_src_members) if len(str(rule_src_members)) <= 38 else str(rule_src_members)[:38]
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip', debug=debug)
rule_dst_members_str = str(rule_dst_members) if len(str(rule_dst_members)) <= 38 else str(rule_dst_members)[:38]
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc', debug=debug)
rule_svc_members_str = str(rule_svc_members) if len(str(rule_svc_members)) <= 38 else str(rule_svc_members)[:38]
# Print
if log_level >= 7:
print('{0:<40}{1:<40}{2:<40}{3:<40}{4:<40}'.format(rule_name, rule_action_members_str, rule_src_members_str, rule_dst_members_str, rule_svc_members_str), file=sys.stderr)
# Process the imported access layers. inspect_access_layers needs to have run first to create the list of IP groups
def process_access_layers(layer_list, ipgroups):
global cnt_netrules_ip, cnt_netrules_fqdn, cnt_chkp_rules
last_action = None
for layer in layer_list:
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'access-rule':
cnt_chkp_rules += 1
# Rule Name and action
rule_name = rule['name']
rule_action = str(find_members(policy_objects, rule['action'], member_list=[])[0])
# If there is a change from deny to allow, or from allow to deny, or if this is the first rule, we need to create a rule collection
if rule_action != last_action:
rule_collection = {
'name': rc_net_name + '-' + rule_action + '-' + str(len(az_net_rcs)),
'action': rule_action,
'rules': []
}
# Append the rule collection to the list of rule collections and set last_action to the new value
az_net_rcs.append(rule_collection)
last_action = rule_action
# action/src/dst/svc object Members
rule_src_members = find_members(policy_objects, rule['source'], member_list=[], mode='ip')
rule_dst_members = find_members(policy_objects, rule['destination'], member_list=[], mode='ip')
rule_svc_members = find_members(policy_objects, rule['service'], member_list=[], mode='svc')
# Print
if len(rule_src_members) > 0 and len(rule_dst_members) > 0 and len(rule_svc_members) > 0:
# 'sourceServiceTags' and 'destinationServiceTags' are auxiliary fields, since the service tags go actually in the 'sourceAddresses' and 'destinationAddresses' fields
# The fields will be removed in the function append_rule
new_rule = {
'name': rule['name'] + '-' + str(rule['uid']),
'ruleType': 'NetworkRule',
'sourceAddresses': [],
'sourceIpGroups': [],
'destinationAddresses': [],
'destinationFqdns': [],
'destinationIpGroups': [],
'sourceServiceTags': [],
'destinationServiceTags': []
}
if not args.rule_id_to_name:
new_rule['name'] = rule['name']
if len(rule_src_members) == 1 and is_ipgroup(ipgroups, rule_src_members[0]):
new_rule['sourceIpGroups'].append(find_ipgroup(ipgroups, rule_src_members[0]))['name']
else:
for src in rule_src_members:
if src == 'any' or src == '*' or 'any' in src or src[0] == 'any':
new_rule['sourceAddresses'] = [ '*' ]
elif is_ipv4(src):
if src not in new_rule['sourceAddresses']:
new_rule['sourceAddresses'].append(src)
# If not an IP address, it must be a service tag
elif src not in new_rule['sourceAddresses']:
if src not in new_rule['sourceServiceTags']:
new_rule['sourceServiceTags'].append(src)
if len(rule_dst_members) == 1 and is_ipgroup(ipgroups, rule_dst_members[0]):
new_rule['destinationIpGroups'].append(find_ipgroup(ipgroups, rule_dst_members[0]))['name']
else:
for dst in rule_dst_members:
if dst == 'any' or dst == '*' or 'any' in dst:
cnt_netrules_ip += 1
new_rule['destinationAddresses'] = [ '*' ]
elif is_fqdn(dst):
cnt_netrules_fqdn += 1
if dst not in new_rule['destinationFqdns']:
cnt_netrules_fqdn += 1
new_rule['destinationFqdns'].append(dst)
elif is_ipv4(dst):
if dst not in new_rule['destinationAddresses']:
cnt_netrules_ip += 1
new_rule['destinationAddresses'].append(dst)
# If not an IP address or a domain name, it must be a service tag
else:
if dst not in new_rule['destinationServiceTags']:
new_rule['destinationServiceTags'].append(dst)
# Services are in an array of 2-tuples (protocol, port)
if 'any' in rule_svc_members:
new_rule['ipProtocols'] = ['Any']
new_rule['destinationPorts'] = [ '*' ]
else:
new_rule['ipProtocols'] = []
new_rule['destinationPorts'] = []
for svc in rule_svc_members:
protocol = svc[0]
port = svc[1]
if protocol == 'tcp' or protocol == 'udp':
if protocol not in new_rule['ipProtocols']:
new_rule['ipProtocols'].append(protocol)
if port not in new_rule['destinationPorts']:
# Checkpoint accepts the syntax >1024, but Azure does not
if port[0] == '>':
new_rule['destinationPorts'].append(str(int(port[1:]) + 1) + '-65535')
else:
new_rule['destinationPorts'].append(port)
elif protocol == 'icmp':
if protocol not in new_rule['ipProtocols']:
new_rule['ipProtocols'].append(protocol)
new_rule['destinationPorts'] = [ '*' ]
elif protocol == 'any':
new_rule['ipProtocols'] = ['Any']
new_rule['destinationPorts'] = [ '*' ]
else:
print('ERROR: Unknown service protocol', protocol, 'in rule', rule_name, file=sys.stderr)
# Add new rule to the latest rule collection (the one we are working on)
if args.remove_explicit_deny and rule_action == 'Drop' and new_rule['sourceAddresses'] == [ '*' ] and new_rule['destinationAddresses'] == [ '*' ] and new_rule['destinationPorts'] == [ '*' ] and new_rule['ipProtocols'] == ['Any']:
discarded_rules.append(rule['uid'])
if log_level >= 6:
print('INFO: Skipping rule "{0}" as it is an explicit catch-all deny rule'.format(rule_name), file=sys.stderr)
else:
az_net_rcs[-1]['rules'] = append_rule(new_rule, az_net_rcs[-1]['rules'])
# If one of the objects was empty, add to the discarded rules
else:
discarded_rules.append(rule['uid'])
# Inspect the imported NAT layers
def inspect_nat_layers(layer_list):
for layer in layer_list:
print('{0:<5}{1:<20}{2:<20}{3:<20}{4:<20}{5:<20}{6:<20}'.format('ID', 'Original Src', 'Translated Src', 'Original Dst', 'Translated Dst', 'Original Svc', 'Translated Svc'), file=sys.stderr)
for rule in layer:
# Check rule is a dictionary and contains a type key
if isinstance(rule, dict) and 'type' in rule:
if rule['type'] == 'nat-rule':
if log_level >= 7:
# Rule ID
rule_id = rule['rule-number']
# src/dst/svc object Members
rule_osrc_members = find_members(policy_objects, rule['original-source'], member_list=[], mode='ip')
rule_osrc_members_str = str(rule_osrc_members) if len(str(rule_osrc_members)) <= 38 else str(rule_osrc_members)[:38]
rule_tsrc_members = find_members(policy_objects, rule['translated-source'], member_list=[], mode='ip')
rule_tsrc_members_str = str(rule_tsrc_members) if len(str(rule_tsrc_members)) <= 38 else str(rule_tsrc_members)[:38]
rule_odst_members = find_members(policy_objects, rule['original-destination'], member_list=[], mode='ip')
rule_odst_members_str = str(rule_odst_members) if len(str(rule_odst_members)) <= 38 else str(rule_odst_members)[:38]
rule_tdst_members = find_members(policy_objects, rule['translated-destination'], member_list=[], mode='ip')
rule_tdst_members_str = str(rule_tdst_members) if len(str(rule_tdst_members)) <= 38 else str(rule_tdst_members)[:38]
rule_osvc_members = find_members(policy_objects, rule['original-service'], member_list=[], mode='svc')
rule_osvc_members_str = str(rule_osvc_members) if len(str(rule_osvc_members)) <= 38 else str(rule_osvc_members)[:38]
rule_tsvc_members = find_members(policy_objects, rule['translated-service'], member_list=[], mode='svc')
rule_tsvc_members_str = str(rule_tsvc_members) if len(str(rule_tsvc_members)) <= 38 else str(rule_tsvc_members)[:38]
# Print
print('{0:<5}{1:<20}{2:<20}{3:<20}{4:<20}{5:<20}{6:<20}'.format(rule_id, rule_osrc_members_str, rule_tsrc_members_str, rule_odst_members_str, rule_tdst_members_str, rule_osvc_members_str, rule_tsvc_members_str), file=sys.stderr)
else:
if log_level >= 7:
print('DEBUG: ignoring rule of type', rule['type'])
else:
print('ERROR: Rule is not a dictionary or does not contain a type key:', str(rule))
if log_level >= 7:
print('DEBUG: Access layers found:', file=sys.stderr)
inspect_access_layers(access_layers)
# Other types of layers (not required)
# if log_level >= 7:
# print('DEBUG: Threat layers found:')
# inspect_access_layers(threat_layers)
# if log_level >= 7:
# print('DEBUG: NAT layer found:')
# inspect_nat_layers(nat_layers)
# Remove ipgroups that contain FQDNs
ipgroups_copy = ipgroups.copy()
for ipgroup in ipgroups_copy:
for x in ipgroup['members']:
if is_fqdn(x):
if log_level >= 7:
print('DEBUG: Removing IP group', ipgroup['name'], 'because it contains FQDN', x, '(IP Groups can only contain IP addresses)', file=sys.stderr)
ipgroups.remove(ipgroup)
break
if log_level >= 6:
print('INFO: {0} out of {1} IP Groups remain after removing FQDNs'.format(len(ipgroups), len(ipgroups_copy)), file=sys.stderr)
# Show ipgroups
ipgroups = sorted(ipgroups, key=lambda d: d['member_count'], reverse=True)
if log_level >= 6:
print('INFO: {0} IP groups found, capping them to the top {1}'.format(len(ipgroups), args.max_ipgroups), file=sys.stderr)
ipgroups = ipgroups[:args.max_ipgroups]
if log_level >= 8:
print('{0:<50}{1:<38}{2:<5}{3:<80}'.format('IP group name', 'CHKP ID', 'Count', 'IP addresses'), file=sys.stderr)
for ipgroup in ipgroups:
ipgroup_members = str(ipgroup['members']) if len(str(ipgroup['members'])) <= 80 else str(ipgroup['members'])[:80]
print('{0:<50}{1:<38}{2:<5}{3:<50}'.format(ipgroup['name'], ipgroup['id'], str(ipgroup['member_count']), ipgroup_members), file=sys.stderr)
# Check whether any IP group is repeated
if len(list(set([x['id'] for x in ipgroups]))) != len(ipgroups):
if log_level >= 4:
print('ERROR: IP groups with repeated IDs found', file=sys.stderr)
if len(list(set([x['name'] for x in ipgroups]))) != len(ipgroups):
if log_level >= 4:
print('ERROR: IP groups with repeated names found', file=sys.stderr)
# Process rules
process_access_layers(access_layers, ipgroups)
if log_level >= 6:
print('INFO: {0} network rules found, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_net_rcs]), len(az_net_rcs), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Drop'])), file=sys.stderr)
# Now we should have all rules stored as network rule collections. Check whether any can be transformed in an application rule
# App rules need to go into their own rule collections
def create_app_rules(net_rcs):
last_action = None
app_rcs = []
# Loop through a copy of the rules (you cannot change a list while looping through it)
net_rcs_copy = net_rcs.copy()
for net_rc in net_rcs_copy:
for net_rule in net_rc['rules']:
# Check whether the rule is for ports 80/443, and whether the target is a FQDN
if set(net_rule['destinationPorts']) in ({'80', '443'}, {'80'}, {'443'}) and len(net_rule['destinationFqdns']) > 0:
if log_level >= 7:
print('DEBUG: Transforming rule', net_rule['name'], 'to an application rule', file=sys.stderr)
if net_rc['action'] != last_action:
rule_collection = {
'name': rc_app_name + '-' + net_rc['action'] + '-' + str(len(az_app_rcs)),
'action': net_rc['action'],
'rules': []
}
# Append the rule collection to the list of rule collections and set last_action to the new value
app_rcs.append(rule_collection)
last_action = net_rc['action']
# Remove the rule from net_rules
net_rc['rules'].remove(net_rule)
# Change the rule type
net_rule['ruleType'] = 'applicationRule'
# Change the ipProtocols/destinationPorts
net_rule.pop('ipProtocols')
net_rule['protocols'] = []
if '80' in net_rule['destinationPorts']:
net_rule['protocols'].append({'protocolType': 'Http', 'port': 80})
if '443' in net_rule['destinationPorts']:
net_rule['protocols'].append({'protocolType': 'Https', 'port': 443})
net_rule['terminateTls'] = False
net_rule.pop('destinationPorts')
# Set some app rule attributes
net_rule['targetFqdns'] = net_rule['destinationFqdns']
net_rule.pop('destinationFqdns')
net_rule['targetUrls'] = []
net_rule['webCategories'] = []
net_rule['fqdnTags'] = []
# Add the rule to the last app rule collection
app_rcs[-1]['rules'].append(net_rule)
# Finished
return net_rcs, app_rcs
# Inspect both allow and deny network rules for candidates to transform into application rules
if args.use_apprules:
if log_level >= 7:
print('DEBUG: Checking whether any network rule can be transformed to an application rule', file=sys.stderr)
# az_net_rules_allow, az_app_rules_allow = create_app_rules(az_net_rules_allow, az_app_rules_allow)
# az_net_rules_deny, az_app_rules_deny = create_app_rules(az_net_rules_deny, az_app_rules_deny)
az_net_rcs, az_app_rcs = create_app_rules(az_net_rcs)
##########
# Output #
##########
# Generate JSON would be creating an object and serialize it
if args.output == "json":
api_version = "2021-08-01"
azfw_policy_name = args.policy_name
arm_template = {
'$schema': 'https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#',
'contentVersion': '1.0.0.0',
'parameters': {},
'variables': {
'location': '[resourceGroup().location]'
},
'resources': []
}
if not args.dont_create_policy:
resource_policy = {
'type': 'Microsoft.Network/firewallPolicies',
'apiVersion': api_version,
'name': azfw_policy_name,
'location': '[variables(\'location\')]',
'properties': {
'sku': {
'tier': args.policy_sku
},
'dnsSettings': {
'enableProxy': 'true'
},
'threatIntelMode': 'Alert'
}
}
arm_template['resources'].append(resource_policy)
resource_rcg = {
'type': 'Microsoft.Network/firewallPolicies/ruleCollectionGroups',
'apiVersion': api_version,
'name': azfw_policy_name + '/' + rcg_name,
'dependsOn': [],
'location': '[variables(\'location\')]',
'properties': {
'priority': rcg_prio,
'ruleCollections': []
}
}
if not args.dont_create_policy:
resource_rcg['dependsOn'].append('[resourceId(\'Microsoft.Network/firewallPolicies\', \'' + azfw_policy_name +'\')]'),
if args.use_ipgroups:
for ip_grp in ipgroups:
resource_ipgroup = {
'type': 'Microsoft.Network/ipGroups',
'apiVersion': api_version,
'name': format_to_arm_name(ip_grp['name']),
'location': '[variables(\'location\')]',
'properties': {
'ipAddresses': ip_grp['members']
}
}
arm_template['resources'].append(resource_ipgroup)
resource_rcg['dependsOn'].append("[resourceId('Microsoft.Network/ipGroups', '{0}')]".format(format_to_arm_name(ip_grp['name'])))
# Add network rule collections to the template
rc_net_prio = int(rc_net_prio_start)
for net_rc in az_net_rcs:
resource_rcg['properties']['ruleCollections'].append({
'ruleCollectionType': 'FirewallPolicyFilterRuleCollection',
'name': net_rc['name'],
'priority': str(rc_net_prio),
'action': {
'type': 'deny' if net_rc['action'] == 'Drop' else 'allow'
},
'rules': net_rc['rules']
})
rc_net_prio += 10
# Add application rule collections to the template
rc_app_prio = int(rc_app_prio_start)
for app_rc in az_app_rcs:
resource_rcg['properties']['ruleCollections'].append({
'ruleCollectionType': 'FirewallPolicyFilterRuleCollection',
'name': app_rc['name'],
'priority': str(rc_app_prio),
'action': {
'type': 'deny' if app_rc['action'] == 'Drop' else 'allow'
},
'rules': app_rc['rules']
})
rc_app_prio += 10
# if len(az_net_rules_allow) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_net_rc_allow)
# if len(az_net_rules_deny) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_net_rc_deny)
# if len(az_app_rules_allow) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_app_rc_allow)
# if len(az_app_rules_deny) > 0:
# resource_rcg['properties']['ruleCollections'].append(resource_app_rc_deny)
arm_template['resources'].append(resource_rcg)
if args.pretty:
print(json.dumps(arm_template, indent=4, sort_keys=True))
else:
print(json.dumps(arm_template))
elif args.output == "none":
if log_level >= 6:
print('INFO: No output type selected', file=sys.stderr)
else:
if log_level >= 3:
print ("ERROR: Output type", args.output, "not recognized!", file=sys.stderr)
# Last info message
if log_level >= 6:
print('INFO: Summary:', file=sys.stderr)
print('INFO: {0} Checkpoint rules analized'.format(str(cnt_chkp_rules)), file=sys.stderr)
print('INFO: {0} Azure Firewall network rules, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_net_rcs]), len(az_net_rcs), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_net_rcs if x['action'] == 'Drop'])), file=sys.stderr)
print('INFO: {0} Azure Firewall application rules, spread across {1} rule collections ({2} allow rules, {3} deny rules)'.format(sum([len(x['rules']) for x in az_app_rcs]), len(az_app_rcs), sum([len(x['rules']) for x in az_app_rcs if x['action'] == 'Accept']), sum([len(x['rules']) for x in az_app_rcs if x['action'] == 'Drop'])), file=sys.stderr)
print('INFO: {0} Checkpoint discarded rules:'.format(len(discarded_rules)), file=sys.stderr)
print_access_layer_rule(access_layers, discarded_rules, debug=True)
| [
"[email protected]"
]
| |
1f9f4decc5db879cfc598fe5c9b819fbed4f43a3 | b79bce0cf363d2b6dd11371d378d78d48e973270 | /kashgari/tasks/classification/base_model.py | 7000ff01695ba716d958546221c048d0d0394381 | [
"Apache-2.0"
]
| permissive | CharlotteSean/Kashgari | 2d9338761b16d9804fb81ff92ce2ab1d256c80a7 | ab9970ecf6c0164416bfbbec1378c690b0f00d76 | refs/heads/master | 2022-01-22T03:52:12.284458 | 2019-07-17T03:48:04 | 2019-07-17T03:48:04 | 197,900,673 | 2 | 0 | Apache-2.0 | 2019-07-20T08:15:03 | 2019-07-20T08:15:03 | null | UTF-8 | Python | false | false | 8,165 | py | # encoding: utf-8
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import kashgari
from typing import Dict, Any, Tuple, Optional, List
from kashgari.tasks.base_model import BaseModel, BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.pre_processor.multi_label:
y_pred_b = self.pre_processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.pre_processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.pre_processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
| [
"[email protected]"
]
| |
ea87058f0b89ef2986d354a4d11d9415fbf9a678 | f5ad59e9ebdd65e00b8a9c1a740bb6eef878fe99 | /utils/augmentations.py | a7ef530c764d19f4b9771ac5b8ccbda6a16f0894 | [
"MIT"
]
| permissive | xiaobingchan/ssd.pytorch | 8c506c7cf6cc8e29cd548b9c561a7b1bf258a4ae | 31df8460f49cfb59d545500af7d92d36e7d643b3 | refs/heads/master | 2023-03-22T02:13:04.155628 | 2021-03-13T03:10:15 | 2021-03-13T03:10:15 | 347,259,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,485 | py | # -*- coding: utf-8 -*-
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])) # [A,B]
area_b = ((box_b[2]-box_b[0]) *
(box_b[3]-box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, boxes=None, labels=None):
for t in self.transforms:
img, boxes, labels = t(img, boxes, labels)
return img, boxes, labels
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, boxes=None, labels=None):
return self.lambd(img, boxes, labels)
class ConvertFromInts(object):
def __call__(self, image, boxes=None, labels=None):
return image.astype(np.float32), boxes, labels
class SubtractMeans(object):
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), boxes, labels
class ToAbsoluteCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] *= width
boxes[:, 2] *= width
boxes[:, 1] *= height
boxes[:, 3] *= height
return image, boxes, labels
class ToPercentCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] /= width
boxes[:, 2] /= width
boxes[:, 1] /= height
boxes[:, 3] /= height
return image, boxes, labels
class Resize(object):
def __init__(self, size=300):
self.size = size
def __call__(self, image, boxes=None, labels=None):
image = cv2.resize(image, (self.size,
self.size))
return image, boxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, boxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, boxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, boxes, labels
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, boxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, boxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, boxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, boxes, labels
class ToCV2Image(object):
def __call__(self, tensor, boxes=None, labels=None):
return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), boxes, labels
class ToTensor(object):
def __call__(self, cvimage, boxes=None, labels=None):
return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), boxes, labels
class RandomSampleCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
boxes (Tensor): the original bounding boxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, boxes, classes)
img (Image): the cropped image
boxes (Tensor): the adjusted bounding boxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self):
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
def __call__(self, image, boxes=None, labels=None):
height, width, _ = image.shape
while True:
# randomly choose a mode
mode = random.choice(self.sample_options)
if mode is None:
return image, boxes, labels
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
# aspect ratio constraint b/t .5 & 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(width - w)
top = random.uniform(height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left+w), int(top+h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = jaccard_numpy(boxes, rect)
# is min and max overlap constraint satisfied? if not try again
if overlap.min() < min_iou and max_iou < overlap.max():
continue
# cut the crop from the image
current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],
:]
# keep overlap with gt box IF center in sampled patch
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])
# mask in that both m1 and m2 are true
mask = m1 * m2
# have any valid boxes? try again if not
if not mask.any():
continue
# take only matching gt boxes
current_boxes = boxes[mask, :].copy()
# take only matching gt labels
current_labels = labels[mask]
# should we use the box left and top corner or the crop's
current_boxes[:, :2] = np.maximum(current_boxes[:, :2],
rect[:2])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, :2] -= rect[:2]
current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:],
rect[2:])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, 2:] -= rect[:2]
return current_image, current_boxes, current_labels
class Expand(object):
def __init__(self, mean):
self.mean = mean
def __call__(self, image, boxes, labels):
if random.randint(2):
return image, boxes, labels
height, width, depth = image.shape
ratio = random.uniform(1, 4)
left = random.uniform(0, width*ratio - width)
top = random.uniform(0, height*ratio - height)
expand_image = np.zeros(
(int(height*ratio), int(width*ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = self.mean
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
boxes = boxes.copy()
boxes[:, :2] += (int(left), int(top))
boxes[:, 2:] += (int(left), int(top))
return image, boxes, labels
class RandomMirror(object):
def __call__(self, image, boxes, classes):
_, width, _ = image.shape
if random.randint(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return image, boxes, classes
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, boxes, labels):
im = image.copy()
im, boxes, labels = self.rand_brightness(im, boxes, labels)
if random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
im, boxes, labels = distort(im, boxes, labels)
return self.rand_light_noise(im, boxes, labels)
class SSDAugmentation(object):
def __init__(self, size=300, mean=(104, 117, 123)):
self.mean = mean
self.size = size
self.augment = Compose([
ConvertFromInts(),
ToAbsoluteCoords(),
PhotometricDistort(),
Expand(self.mean),
RandomSampleCrop(),
RandomMirror(),
ToPercentCoords(),
Resize(self.size),
SubtractMeans(self.mean)
])
def __call__(self, img, boxes, labels):
return self.augment(img, boxes, labels)
| [
"[email protected]"
]
| |
02650d3908ac10b7a8f2d51dad07cac1493b5748 | 2eb779146daa0ba6b71344ecfeaeaec56200e890 | /python/oneflow/compatible/single_client/ops/layers.py | 1f64355013b6e7f7ad79cbd2282b85e7c29c16d5 | [
"Apache-2.0"
]
| permissive | hxfxjun/oneflow | ee226676cb86f3d36710c79cb66c2b049c46589b | 2427c20f05543543026ac9a4020e479b9ec0aeb8 | refs/heads/master | 2023-08-17T19:30:59.791766 | 2021-10-09T06:58:33 | 2021-10-09T06:58:33 | 414,906,649 | 0 | 0 | Apache-2.0 | 2021-10-09T06:15:30 | 2021-10-08T08:29:45 | C++ | UTF-8 | Python | false | false | 63,471 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable, Optional, Sequence, Tuple, Union
import oneflow._oneflow_internal
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework import distribute as distribute_util
from oneflow.compatible.single_client.framework import remote_blob as remote_blob_util
from oneflow.core.job import initializer_conf_pb2 as initializer_conf_util
from oneflow.core.job import regularizer_conf_pb2 as regularizer_conf_util
from oneflow.core.operator import op_conf_pb2 as op_conf_util
IntPair = Tuple[int, int]
def dense(
inputs: oneflow._oneflow_internal.BlobDesc,
units: int,
activation: Optional[
Callable[
[oneflow._oneflow_internal.BlobDesc, str],
oneflow._oneflow_internal.BlobDesc,
]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Dense",
model_distribute: oneflow._oneflow_internal.distribute.Distribute = oneflow._oneflow_internal.distribute.broadcast(),
) -> oneflow._oneflow_internal.BlobDesc:
"""Fully-connected layer.
The fully-connected layer multiplies input Blob with weight matrix and produces an Output Blob.
Args:
inputs (oneflow._oneflow_internal.BlobDesc): A 2D input `Blob`.
units (int): A positive integer for the dimensionality of the output space.
activation (Optional[oneflow._oneflow_internal.BlobDesc], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer function applied to the kernel weights matrix. Defaults to None.
bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train the variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
model_distribute (oneflow._oneflow_internal.distribute.Distribute, optional): Define the way to ditribute the model. Defaults to oneflow._oneflow_internal.distribute.broadcast().
Returns:
oneflow._oneflow_internal.BlobDesc: A N-D `Blob` with the shape of (batch_size, units).
Raises:
ValueError: The dimension of input `Blob` must be less than 2.
VauleError: Model distribute must be in auto, broadcast, split.
ValueError: The input must be a 2D `Blob` when the model distribute is split.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def dense_Job(x: tp.Numpy.Placeholder((1, 256))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
hidden = flow.layers.dense(
x,
512,
activation=flow.nn.relu,
kernel_initializer=initializer,
name="dense1",
)
return hidden
x = np.random.randn(1, 256).astype(np.float32)
out = dense_Job(x)
# out.shape (1, 512)
"""
in_shape = inputs.shape
in_num_axes = len(in_shape)
assert in_num_axes >= 2
assert (
model_distribute is oneflow._oneflow_internal.distribute.auto()
or model_distribute is oneflow._oneflow_internal.distribute.broadcast()
or model_distribute is oneflow._oneflow_internal.distribute.split(0)
)
if model_distribute is oneflow._oneflow_internal.distribute.split(0):
assert in_num_axes == 2
if in_num_axes > 2:
inputs = flow.reshape(inputs, (-1, in_shape[-1]))
with flow.scope.namespace(name):
if kernel_initializer is None:
kernel_initializer = flow.constant_initializer(0)
weight = flow.get_variable(
name="weight",
shape=(units, inputs.shape[1]),
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
distribute=model_distribute,
reuse=False,
)
weight = weight.with_distribute(model_distribute)
out = flow.matmul(a=inputs, b=weight, transpose_b=True, name="matmul")
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
bias = flow.get_variable(
name="bias",
shape=(units,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
distribute=model_distribute,
reuse=False,
)
bias = bias.with_distribute(model_distribute)
out = flow.nn.bias_add(out, bias, name="bias_add")
if callable(activation):
out = activation(out, name="activation")
if in_num_axes > 2:
out = flow.reshape(out, in_shape[:-1] + (units,))
return out
def conv1d(
inputs: oneflow._oneflow_internal.BlobDesc,
filters: int,
kernel_size: Union[int, Tuple[int]] = 1,
strides: Union[int, Tuple[int]] = 1,
padding: Union[str, Tuple[IntPair, IntPair, IntPair]] = "VALID",
data_format: str = "NCW",
dilation_rate: Optional[Union[int, Tuple[int]]] = None,
groups: int = 1,
activation: Optional[
Callable[
[oneflow._oneflow_internal.BlobDesc, str],
oneflow._oneflow_internal.BlobDesc,
]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Conv1d",
weight_name: Optional[str] = None,
bias_name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""1D convolution layer.
This layer computes a 1-D convolution with 3D input Blob and filters.
Args:
inputs (oneflow._oneflow_internal.BlobDesc): A 3D input `Blob`.
filters (int): An integer specifies the dimensionality of the output space.
kernel_size (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
When it is an integer, a square window is applied to the input. Defaults to 1.
strides (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
padding (str, Tuple[IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCW" or "NWC" (default: "NCW"). "NCW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, width).
"NWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, channels, width). Defaults to "NCW".
dilation_rate (Optional[Union[int, Tuple[int]]], optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
Raises:
ValueError: If the type of kernel_size is not one of integer, list, tuple.
ValueError: The number of groups must be positive and number of filters must be divisible by it.
ValueError: If data_format is not one of 'NCW', 'NWC'.
ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
ValueError: Number of group must be one when data_format is 'NWC'.
Returns:
oneflow._oneflow_internal.BlobDesc: A 3D `Blob` with the shape of (batch_size, filters, new_width).
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def conv1d_Job(x: tp.Numpy.Placeholder((1, 64, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
conv1d = flow.layers.conv1d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv1d"
)
return conv1d
x = np.random.randn(1, 64, 32).astype(np.float32)
out = conv1d_Job(x)
# out.shape (1, 128, 32)
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size,)
else:
assert isinstance(kernel_size, (list, tuple))
assert len(kernel_size) == 1
kernel_size = tuple(kernel_size)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters
assert filters % groups == 0
if data_format.upper() == "NCW":
assert groups <= inputs.shape[1]
assert inputs.shape[1] % groups == 0
weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
elif data_format.upper() == "NWC":
assert groups == 1
assert groups <= inputs.shape[2]
assert inputs.shape[2] % groups == 0
weight_shape = (filters, kernel_size[0], inputs.shape[2] // groups)
else:
raise ValueError("data_format must be in NCW or NWC")
if kernel_initializer is None:
kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
if weight_name is None:
with flow.scope.namespace(name):
weight = flow.get_variable(
name="weight",
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
else:
weight = flow.get_variable(
name=weight_name,
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
output = flow.nn.conv1d(
inputs,
weight,
strides,
padding,
data_format,
dilation_rate,
groups=groups,
name=name,
)
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
if bias_name is None:
with flow.scope.namespace(name):
bias = flow.get_variable(
name="bias",
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
else:
bias = flow.get_variable(
name=bias_name,
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
with flow.scope.namespace(name):
output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
if callable(activation):
with flow.scope.namespace(name):
output = activation(output, name="activation")
return output
def conv2d(
inputs: oneflow._oneflow_internal.BlobDesc,
filters: int,
kernel_size: Union[int, IntPair] = 1,
strides: Union[int, IntPair] = 1,
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]] = "VALID",
data_format: str = "NCHW",
dilation_rate: Optional[Union[int, IntPair]] = None,
groups: int = 1,
activation: Optional[
Callable[
[oneflow._oneflow_internal.BlobDesc, str],
oneflow._oneflow_internal.BlobDesc,
]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Conv2d",
weight_name: Optional[str] = None,
bias_name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""2D convolution layer.
This layer computes a 2D convolution with 4D input Blob and filters.
Args:
inputs (oneflow._oneflow_internal.BlobDesc): A 4D input `Blob`.
filters (int): An integer specifies the dimensionality of the output space.
kernel_size (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
When it is an integer, a square window is applied to the input. Defaults to 1.
strides (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
padding (str, Tuple[IntPair, IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
"NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels). Defaults to "NCHW".
dilation_rate (int, optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
weight_name (Optional[str], optional): This weight's name. Defaults to None.
bias_name (Optional[str], optional): This bias's name. Defaults to None.
Raises:
ValueError: If the type of kernel_size is not one of integer, list, tuple.
ValueError: The number of groups must be positive and number of filters must be divisible by it.
ValueError: If data_format is not one of 'NCHW', 'NHWC'.
ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
ValueError: Number of group must be one when data_format is 'NHWC'.
Returns:
oneflow._oneflow_internal.BlobDesc: A 4D `Blob` with the shape of (batch_size, filters, new_height, new_width).
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def conv2d_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_Job(x)
# out.shape (1, 128, 32, 32)
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
else:
assert isinstance(kernel_size, (list, tuple))
assert len(kernel_size) == 2
kernel_size = tuple(kernel_size)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters
assert filters % groups == 0
if data_format.upper() == "NCHW":
assert groups <= inputs.shape[1]
assert inputs.shape[1] % groups == 0
weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
elif data_format.upper() == "NHWC":
assert groups == 1
assert groups <= inputs.shape[3]
assert inputs.shape[3] % groups == 0
weight_shape = (
filters,
kernel_size[0],
kernel_size[1],
inputs.shape[3] // groups,
)
else:
raise ValueError("data_format must be in NCHW or NHWC")
if kernel_initializer is None:
kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
if weight_name is None:
with flow.scope.namespace(name):
weight = flow.get_variable(
name="weight",
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
else:
weight = flow.get_variable(
name=weight_name,
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
output = flow.nn.conv2d(
inputs,
weight,
strides=strides,
padding=padding,
bias=None,
data_format=data_format,
dilations=dilation_rate,
groups=groups,
name=name,
)
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
if bias_name is None:
with flow.scope.namespace(name):
bias = flow.get_variable(
name="bias",
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
else:
bias = flow.get_variable(
name=bias_name,
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
with flow.scope.namespace(name):
output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
if callable(activation):
with flow.scope.namespace(name):
output = activation(output, name="activation")
return output
def conv3d(
inputs: oneflow._oneflow_internal.BlobDesc,
filters: int,
kernel_size: Union[int, Sequence[int]] = 1,
strides: Union[int, Sequence[int]] = 1,
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]] = "VALID",
data_format: str = "NCDHW",
dilation_rate: Optional[Union[int, IntPair]] = None,
groups: int = 1,
activation: Optional[
Callable[
[oneflow._oneflow_internal.BlobDesc, str],
oneflow._oneflow_internal.BlobDesc,
]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Conv3d",
weight_name: Optional[str] = None,
bias_name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""3D convolution layer.
This layer computes 3D convolution with 5D input Blob and filters
Args:
inputs (oneflow._oneflow_internal.BlobDesc): A 5D input `Blob`.
filters (int): An integer specifies the dimensionality of the output space.
kernel_size (Union[int, List[int], Sequence[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
When it is an integer, a square window is applied to the input. Defaults to 1.
strides (Union[int, List[int], Sequence[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
padding (str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCDHW" or "NDHWC" (default: "NCDHW"). "NCDHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, depth, height, width).
"NDHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, channels, depth, height, width). Defaults to "NCDHW".
dilation_rate (int, optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
weight_name (Optional[str], optional): This weight's name. Defaults to None.
bias_name (Optional[str], optional): This bias's name. Defaults to None.
Raises:
ValueError: If the type of kernel_size is not one of integer, list, tuple.
ValueError: The number of groups must be positive and number of filters must be divisible by it.
ValueError: If data_format is not one of 'NCDHW', 'NDHWC'.
ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
ValueError: Number of group must be one when data_format is 'NDHWC'.
Returns:
oneflow._oneflow_internal.BlobDesc: A 5D `Blob` with the shape of (batch_size, filters, new_height, new_width).
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def conv3d_Job(x: tp.Numpy.Placeholder((1, 64, 16, 16, 16))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
conv3d = flow.layers.conv3d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv3d"
)
return conv3d
x = np.random.randn(1, 64, 16, 16, 16).astype(np.float32)
out = conv3d_Job(x)
# out.shape (1, 128, 16, 16, 16)
"""
need_transpose = 0
if data_format.upper() == "NDHWC":
need_transpose = 1
data_format = "NCDHW"
if need_transpose:
inputs = flow.transpose(inputs, perm=[0, 4, 1, 2, 3])
if isinstance(padding, (list, tuple)):
padding = list(padding)
(padding[1], padding[4]) = (padding[4], padding[1])
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
else:
assert isinstance(kernel_size, (list, tuple))
assert len(kernel_size) == 3
kernel_size = tuple(kernel_size)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters
assert filters % groups == 0
if data_format.upper() == "NCDHW":
assert groups <= inputs.shape[1]
assert inputs.shape[1] % groups == 0
weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
elif data_format.upper() == "NDHWC":
assert groups == 1
assert groups <= inputs.shape[3]
assert inputs.shape[3] % groups == 0
weight_shape = (
filters,
kernel_size[0],
kernel_size[1],
kernel_size[2],
inputs.shape[4] // groups,
)
else:
raise ValueError("data_format must be in NCHW or NHWC")
if kernel_initializer is None:
kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
if weight_name is None:
with flow.scope.namespace(name):
weight = flow.get_variable(
name="weight",
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
else:
weight = flow.get_variable(
name=weight_name,
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
output = flow.nn.conv3d(
inputs,
weight,
strides,
padding,
data_format,
dilation_rate,
groups=groups,
name=name,
)
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
if bias_name is None:
with flow.scope.namespace(name):
bias = flow.get_variable(
name="bias",
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
else:
bias = flow.get_variable(
name=bias_name,
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
with flow.scope.namespace(name):
output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
if callable(activation):
with flow.scope.namespace(name):
output = activation(output, name="activation")
if need_transpose:
output = flow.transpose(output, perm=[0, 2, 3, 4, 1])
return output
def layer_norm(
inputs: oneflow._oneflow_internal.BlobDesc,
center: bool = True,
scale: bool = True,
trainable: bool = True,
begin_norm_axis: int = 1,
begin_params_axis: int = -1,
epsilon: float = 1e-05,
name: str = "LayerNorm",
) -> oneflow._oneflow_internal.BlobDesc:
"""Layer Normalization.
Args:
inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
center (bool, optional): A boolean specifies whether to shift input `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to scale input `Blob`. Defaults to True.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
begin_params_axis (int, optional): An integer specifies which axis params at . Defaults to -1.
epsilon (float, optional): A small float is added to avoid division by zero. Defaults to 1e-5.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A normalized `Blob` with same shape of input.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def layer_norm_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
layer_norm = flow.layers.layer_norm(
x,
name="LayerNorm1"
)
return layer_norm
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = layer_norm_Job(x)
# out.shape (1, 64, 128, 128)
"""
if center is False and scale is False:
trainable = False
beta = None
gamma = None
param_shape = inputs.shape[begin_params_axis:]
if center:
with flow.scope.namespace(name):
beta = flow.get_variable(
name="beta",
shape=param_shape,
dtype=inputs.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
model_name="beta",
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
if scale:
with flow.scope.namespace(name):
gamma = flow.get_variable(
name="gamma",
shape=param_shape,
dtype=inputs.dtype,
initializer=flow.constant_initializer(1.0),
trainable=trainable,
model_name="gamma",
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
if begin_norm_axis < 0:
begin_norm_axis = begin_norm_axis + len(inputs.shape)
reduce_axis = []
for dim in range(len(inputs.shape)):
if dim >= begin_norm_axis:
reduce_axis.append(dim)
(mean, variance) = flow.nn.moments(inputs, reduce_axis, keepdims=True)
axis = begin_norm_axis
normalized = flow.nn.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
variance_epsilon=epsilon,
axis=axis,
name=name,
)
nd_params_shape = [1] * (len(inputs.shape) - len(param_shape)) + list(
param_shape
)
affined = normalized
if gamma:
gamma = flow.reshape(gamma, nd_params_shape)
affined *= gamma
if beta:
beta = flow.reshape(beta, nd_params_shape)
affined += beta
return affined
elif flow.current_scope().device_parallel_desc_symbol.device_tag == "gpu":
op_builder = (
flow.user_op_builder(name)
.Op("layer_norm")
.Input("x", [inputs])
.Output("y")
.Output("mean")
.Output("inv_variance")
)
if beta is not None:
op_builder.Input("beta", [beta])
if gamma is not None:
op_builder.Input("gamma", [gamma])
op_builder.Output("normalized")
op_builder.Attr("center", center)
op_builder.Attr("scale", scale)
op_builder.Attr("begin_norm_axis", begin_norm_axis)
op_builder.Attr("begin_params_axis", begin_params_axis)
op_builder.Attr("epsilon", epsilon)
return op_builder.Build().InferAndTryRun().RemoteBlobList()[0]
else:
raise NotImplementedError
def layer_norm_grad(
dy: oneflow._oneflow_internal.BlobDesc,
x: oneflow._oneflow_internal.BlobDesc,
mean: oneflow._oneflow_internal.BlobDesc,
inv_variance: oneflow._oneflow_internal.BlobDesc,
begin_norm_axis: int = 1,
name: str = "LayerNormGrad",
) -> oneflow._oneflow_internal.BlobDesc:
"""Layer normalization
Args:
dy (oneflow._oneflow_internal.BlobDesc): Upstream derivstives.
x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
mean (oneflow._oneflow_internal.BlobDesc): Mean over neurons.
inv_variance (oneflow._oneflow_internal.BlobDesc): Variance over neurons.
begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: Gradient with respect to input `Blob`.
"""
op = (
flow.user_op_builder(name)
.Op("layer_norm_grad")
.Input("dy", [dy])
.Input("x", [x])
.Input("mean", [mean])
.Input("inv_variance", [inv_variance])
.Output("dx")
.Attr("begin_norm_axis", begin_norm_axis)
.Attr("epsilon", 1e-05)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
def layer_norm_param_grad(
dy: oneflow._oneflow_internal.BlobDesc,
norm: oneflow._oneflow_internal.BlobDesc,
gamma: oneflow._oneflow_internal.BlobDesc,
begin_params_axis: int = -1,
name: str = "LayerNormParamGrad",
) -> Tuple[
oneflow._oneflow_internal.BlobDesc,
oneflow._oneflow_internal.BlobDesc,
oneflow._oneflow_internal.BlobDesc,
]:
"""Backward pass for layer normalization
Args:
dy (oneflow._oneflow_internal.BlobDesc): Upstream derivstives.
norm (oneflow._oneflow_internal.BlobDesc): Normalized output.
gamma (oneflow._oneflow_internal.BlobDesc): Scale parameter.
begin_params_axis (int, optional): From which parameters to begin with. Defaults to -1.
name (Optional[str], optional): This layer's name. Defaults to 'LayerNormParamGrad'.
Returns:
Tuple[oneflow._oneflow_internal.BlobDesc]:
normalized_diff: Gradient with respect to input `Blob`.
beta_diff: Gradient with respect to shift parameter beta.
gamma_diff: Gradient with respect to scale parameter gamma.
"""
op = (
flow.user_op_builder(name)
.Op("layer_norm_param_grad")
.Input("dy", [dy])
.Input("normalized", [norm])
.Input("gamma", [gamma])
.Output("normalized_diff")
.Output("beta_diff")
.Output("gamma_diff")
.Output("reduce_buf")
.Attr("begin_params_axis", begin_params_axis)
.Build()
)
(
normalized_diff,
beta_diff,
gamma_diff,
reduce_buf,
) = op.InferAndTryRun().RemoteBlobList()
return (normalized_diff, beta_diff, gamma_diff)
def _get_batch_normalization_variables(
name,
gamma_name,
beta_name,
moving_mean_name,
moving_variance_name,
center,
scale,
params_shape,
params_dtype,
trainable,
beta_initializer,
beta_regularizer,
gamma_initializer,
gamma_regularizer,
moving_mean_initializer,
moving_variance_initializer,
):
def get_beta_var(name):
if center:
beta = flow.get_variable(
name=name,
shape=params_shape,
dtype=params_dtype,
initializer=beta_initializer or flow.zeros_initializer(),
regularizer=beta_regularizer,
trainable=trainable,
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
else:
beta = flow.constant(0, dtype=params_dtype, shape=params_shape, name=name)
return beta
if beta_name is None:
with flow.scope.namespace(name):
beta = get_beta_var("beta")
else:
beta = get_beta_var(beta_name)
def get_gamma_var(name):
if scale:
gamma = flow.get_variable(
name=name,
shape=params_shape,
dtype=params_dtype,
initializer=gamma_initializer or flow.ones_initializer(),
regularizer=gamma_regularizer,
trainable=trainable,
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
else:
gamma = flow.constant(1, dtype=params_dtype, shape=params_shape, name=name)
return gamma
if gamma_name is None:
with flow.scope.namespace(name):
gamma = get_gamma_var("gamma")
else:
gamma = get_gamma_var(gamma_name)
def get_moving_mean_var(name):
moving_mean = flow.get_variable(
name=name,
shape=params_shape,
dtype=params_dtype,
initializer=moving_mean_initializer or flow.zeros_initializer(),
trainable=False,
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
return moving_mean
if moving_mean_name is None:
with flow.scope.namespace(name):
moving_mean = get_moving_mean_var("moving_mean")
else:
moving_mean = get_moving_mean_var(moving_mean_name)
def get_moving_variance_var(name):
moving_variance = flow.get_variable(
name=name,
shape=params_shape,
dtype=params_dtype,
initializer=moving_variance_initializer or flow.ones_initializer(),
trainable=False,
distribute=oneflow._oneflow_internal.distribute.broadcast(),
reuse=False,
)
return moving_variance
if moving_variance_name is None:
with flow.scope.namespace(name):
moving_variance = get_moving_variance_var("moving_variance")
else:
moving_variance = get_moving_variance_var(moving_variance_name)
return (beta, gamma, moving_mean, moving_variance)
def batch_normalization(
inputs: oneflow._oneflow_internal.BlobDesc,
axis: int = -1,
momentum: float = 0.99,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
trainable: bool = True,
training: bool = True,
name: str = "BatchNorm",
gamma_name: Optional[str] = None,
beta_name: Optional[str] = None,
moving_mean_name: Optional[str] = None,
moving_variance_name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""The BatchNormalization Layer.
This layer can be used in conv or dense layer.
The input data will be normalized by the mean and variance of the current batch data
Args:
inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
momentum (float, optional): A float specifies the momentum for the moving average. Defaults to 0.99.
epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
gamma_name (Optional[str], optional): This gamma's name. Defaults to None.
beta_name (Optional[str], optional): This beta's name. Defaults to None.
moving_mean_name (Optional[str], optional): This moving_mean's name. Defaults to None.
moving_variance_name (Optional[str], optional): This moving_var's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with same shape of input.
Raises:
ValueError: If axis is out of dimension of input.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def batch_norm_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
batch_norm = flow.layers.batch_normalization(
conv2d,
axis=1
)
return batch_norm
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = batch_norm_Job(x)
# out.shape (1, 128, 64, 64)
"""
if axis < 0:
axis += len(inputs.shape)
assert axis >= 0 and axis < len(inputs.shape)
params_shape = [inputs.shape[axis]]
params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
if not flow.current_global_function_desc().IsTrainable() or not trainable:
training = False
(beta, gamma, moving_mean, moving_variance) = _get_batch_normalization_variables(
name,
gamma_name,
beta_name,
moving_mean_name,
moving_variance_name,
center,
scale,
params_shape,
params_dtype,
trainable,
beta_initializer,
beta_regularizer,
gamma_initializer,
gamma_regularizer,
moving_mean_initializer,
moving_variance_initializer,
)
if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
if training:
reduce_axis = []
for dim in range(len(inputs.shape)):
if dim != axis:
reduce_axis.append(dim)
(mean, variance) = flow.nn.moments(inputs, reduce_axis, keepdims=False)
def update_moving(moving, this_batch):
moving_identity = flow.identity(moving)
flow.assign(
moving, momentum * moving_identity + (1 - momentum) * this_batch
)
update_moving(moving_mean, mean)
update_moving(moving_variance, variance)
return flow.nn.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
offset=beta,
scale=gamma,
variance_epsilon=epsilon,
axis=axis,
name=name,
)
else:
mean = moving_mean
variance = moving_variance
return flow.nn.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
offset=beta,
scale=gamma,
variance_epsilon=epsilon,
axis=axis,
name=name,
)
else:
builder = (
flow.user_op_builder(name)
.Op("normalization")
.Input("x", [inputs])
.Input("moving_mean", [moving_mean])
.Input("moving_variance", [moving_variance])
.Input("gamma", [gamma])
.Input("beta", [beta])
.Output("y")
.Attr("axis", axis)
.Attr("epsilon", epsilon)
.Attr("training", training)
.Attr("momentum", momentum)
)
if trainable and training:
builder = builder.Output("mean").Output("inv_variance")
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def batch_normalization_add_relu(
inputs: oneflow._oneflow_internal.BlobDesc,
addend: Optional[oneflow._oneflow_internal.BlobDesc] = None,
axis: int = -1,
momentum: float = 0.99,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
trainable: bool = True,
training: bool = True,
name: str = "BatchNorm",
gamma_name: Optional[str] = None,
beta_name: Optional[str] = None,
moving_mean_name: Optional[str] = None,
moving_variance_name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""Fused flow.layers.batch_normalization + flow.math.add + flow.math.relu
Args:
inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
addend (oneflow._oneflow_internal.BlobDesc): `Blob` add to batch_normalization output.
axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
momentum (float, optional): A float specifies the momentum for the moving average. Defaults to 0.99.
epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
gamma_name (Optional[str], optional): This gamma's name. Defaults to None.
beta_name (Optional[str], optional): This beta's name. Defaults to None.
moving_mean_name (Optional[str], optional): This moving_mean's name. Defaults to None.
moving_variance_name (Optional[str], optional): This moving_var's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with same shape of input.
Raises:
ValueError: If axis is out of dimension of input.
"""
if not flow.current_global_function_desc().IsTrainable() or not trainable:
training = False
if (
not training
or flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu"
):
out = flow.layers.batch_normalization(
inputs,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
trainable=trainable,
training=training,
name=name,
)
with flow.scope.namespace("BatchNormAddRelu"):
if addend is not None:
out = out + addend
return flow.math.relu(out)
if axis < 0:
axis += len(inputs.shape)
assert 0 <= axis < len(inputs.shape)
params_shape = [inputs.shape[axis]]
params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
(beta, gamma, moving_mean, moving_variance) = _get_batch_normalization_variables(
name,
gamma_name,
beta_name,
moving_mean_name,
moving_variance_name,
center,
scale,
params_shape,
params_dtype,
trainable,
beta_initializer,
beta_regularizer,
gamma_initializer,
gamma_regularizer,
moving_mean_initializer,
moving_variance_initializer,
)
builder = (
flow.user_op_builder(name)
.Op("normalization_add_relu")
.Input("x", [inputs])
.Input("moving_mean", [moving_mean])
.Input("moving_variance", [moving_variance])
.Input("gamma", [gamma])
.Input("beta", [beta])
.Output("y")
.Output("mean")
.Output("inv_variance")
.Output("reserve_space")
.Attr("axis", axis)
.Attr("epsilon", epsilon)
.Attr("momentum", momentum)
)
if addend is not None:
builder = builder.Input("addend", [addend])
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def batch_normalization_relu(
inputs: oneflow._oneflow_internal.BlobDesc,
axis: int = -1,
momentum: float = 0.99,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
trainable: bool = True,
training: bool = True,
name: str = "BatchNorm",
) -> oneflow._oneflow_internal.BlobDesc:
"""Fused flow.layers.batch_normalization + flow.math.relu
Args:
inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
momentum (float, optional): A float specifies the momentum for the moving average. Defaults to 0.99.
epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with same shape of input.
Raises:
ValueError: If axis is out of dimension of input.
"""
return flow.layers.batch_normalization_add_relu(
inputs,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
trainable=trainable,
training=training,
name=name,
)
def upsample(
x: oneflow._oneflow_internal.BlobDesc,
size: Sequence[int] = (2, 2),
align_corners: bool = False,
data_format: str = "NCHW",
interpolation: str = "nearest",
name: str = "Upsample2D",
):
"""The Upsample Layer, this layer can upsample the feature map to a specified scale.
Args:
x ([type]): Input `Blob`.
size (tuple, optional): (height_scale, width_scale) Defaults to (2, 2).
align_corners (bool, optional): Defaults to False.
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
"NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels).. Defaults to "NCHW".
interpolation (str, optional): Image interpolation algorithm to enlarge the image size. Defaults to "nearest". "nearest" and "bilinear" are available now.
name ([type], optional): This layer's name. Defaults to None.
Raises:
ValueError: interpolation must be "nearest" or "bilinear".
ValueError: data_format must be "NHWC" or "NCHW"
Returns:
[type]: oneflow._oneflow_internal.BlobDesc: A `Blob` which is the upsampled `x`. If `size` is (2, 2), the shape of return value is [N, C, 2H, 2W].
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def upsample_Job(x: tp.Numpy.Placeholder((1, 32, 32, 32))
) -> tp.Numpy:
upsample = flow.layers.upsample_2d(
x,
size=(2, 2),
name="Upsample1"
)
return upsample
x = np.random.randn(1, 32, 32, 32).astype(np.float32)
out = upsample_Job(x)
# out.shape (1, 32, 64, 64)
"""
if isinstance(size, int):
height_scale = size
width_scale = size
else:
assert isinstance(size, (list, tuple))
assert len(size) == 2
height_scale = size[0]
width_scale = size[1]
if interpolation != "nearest" and interpolation != "bilinear":
raise ValueError('interpolation must be "nearest" or "bilinear".')
if interpolation == "nearest" and align_corners:
raise ValueError('interpolation "nearest" does not support align_corners.')
if data_format.upper() != "NCHW" and data_format.upper() != "NHWC":
raise ValueError('data_format must be "NHWC" or "NCHW".')
need_transpose = 0
if data_format.upper() == "NHWC":
need_transpose = 1
if need_transpose:
x = flow.transpose(x, perm=[0, 3, 1, 2])
op = (
flow.user_op_builder(name)
.Op("upsample")
.Input("x", [x])
.Output("y")
.Attr("height_scale", float(height_scale))
.Attr("width_scale", float(width_scale))
.Attr("align_corners", align_corners)
.Attr("data_format", "channels_first")
.Attr("interpolation", interpolation)
.Build()
)
output = op.InferAndTryRun().SoleOutputBlob()
if need_transpose:
output = flow.transpose(output, perm=[0, 2, 3, 1])
return output
| [
"[email protected]"
]
| |
4887675c21970c73fbb8d10f2891370c490380cb | 387587c753e76d98a6a0401327766c45561d5109 | /ros_catkin_ws/devel_isolated/roslaunch/lib/python2.7/dist-packages/roslaunch/__init__.py | 8752f22becaf4ebc75be508c7fbdbba3736db545 | [
"MIT"
]
| permissive | letrend/neopixel_fpga | 7a4819a566fab02bd602c3338b8aaa0ddf4bee85 | d9247417a9d311eceebad5898571846c6e33a44a | refs/heads/master | 2021-01-23T01:00:55.290431 | 2017-05-30T20:15:38 | 2017-05-30T20:15:38 | 92,855,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/root/ros_catkin_ws/src/ros_comm/roslaunch/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"[email protected]"
]
| |
ad0b0295d9ea1ffea7f3b2b7763f265e7b92aa1a | 0f923a810256cfccac6c0642383638accaa8c83d | /terminal.py | aa7f6ccc6415275cf20270726ab7f148281295b2 | [
"MIT"
]
| permissive | overvenus/terminal-tools | 7aca04bb4aca7122e821216ba53727d760f6bdb3 | 2db36334f9df068a30b42adfe270117f7ac5ae0d | refs/heads/master | 2021-01-12T12:09:50.676607 | 2015-12-16T12:22:45 | 2015-12-16T12:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,883 | py | #!/usr/bin/env python
import json
import urllib
import urllib2
# Authentication
def setup_credentials(utoken, atoken, credsfile):
if utoken is None and atoken is None:
try:
creds = json.load(open(credsfile, 'r'))
utoken = creds['user_token']
atoken = creds['access_token']
except:
print "Can't open credentials file. \n ", \
"You must provide a user token and a access token at least one time, or a valid credentials file"
exit(127)
elif (utoken is not None and atoken is None) or (utoken is None and atoken is not None):
print "--utoken AND --atoken parameters must be passed together"
exit(1)
else:
with open(credsfile, 'w') as cfile:
json.dump({'user_token': utoken, 'access_token': atoken}, cfile)
global user_token
global access_token
user_token, access_token = str(utoken), str(atoken)
return user_token, access_token
# Manage Request
def make_request(call, params=None, url=None, headers=None, raw=False):
try:
if url is None:
url = 'https://api.terminal.com/v0.2/%s' % call
if headers is None:
headers={'user-token': user_token,'access-token':access_token, 'Content-Type':'application/json'}
if params is None:
data = json.dumps({})
else:
parsed_params={}
for key in params.keys():
if params[key] is not None:
parsed_params.update({key:params[key]})
if raw:
data = urllib.urlencode(parsed_params)
headers.pop('Content-Type')
else:
data = json.dumps(parsed_params)
req = urllib2.Request(url, data, headers)
response = urllib2.urlopen(req)
results = json.loads(response.read())
results.update({u'success':True})
map(str,results)
return results
except urllib2.HTTPError as e:
return json.loads(e.read())
# Browse Snapshots and Users
def get_snapshot(snapshot_id):
call = 'get_snapshot'
params = {"snapshot_id":snapshot_id}
response = make_request(call, params)
return response
def get_profile(username):
call = 'get_profile'
params = {"username":username}
response = make_request(call, params)
return response
def list_public_snapshots(username, tag=None, featured=None, title=None, page=0, perPage=1000 ,sortby='popularity'):
call = 'list_public_snapshots'
params = {'username':username, 'tag':tag, 'featured':featured, 'title':title,\
'page':page, 'perPage':perPage, 'sortby':sortby}
response = make_request(call, params)
return response
def count_public_snapshots(username=None, tag=None, featured=None, title=None):
call = 'count_public_snapshots'
params = {'username':username, 'tag':tag, 'featured':featured, 'title':title}
response = make_request(call, params)
return response
# Create and Manage Terminals
def list_terminals():
call = 'list_terminals'
params = None
response = make_request(call, params)
return response
def get_terminal(container_key=None, subdomain=None):
if (container_key is None and subdomain is None):
return {'error':'container_key OR subdomain must be passed'}
call = 'get_terminal'
params = {'container_key':container_key, 'subdomain':subdomain}
response = make_request(call, params)
return response
def start_snapshot(snapshot_id, instance_type=None, temporary=None, name=None, autopause=None, startup_script=None):
call = 'start_snapshot'
params = {'snapshot_id': snapshot_id, 'instance_type': instance_type, 'temporary': temporary, 'name': name,
'autopause': autopause, 'startup_script': startup_script}
response = make_request(call, params)
return response
def delete_terminal(container_key):
call = 'delete_terminal'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def restart_terminal(container_key):
call = 'restart_terminal'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def pause_terminal(container_key):
call = 'pause_terminal'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def resume_terminal(container_key):
call = 'resume_terminal'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def edit_terminal(container_key, instance_type=None, diskspace=None, name=None, custom_data=None):
call = 'edit_terminal'
params = {'container_key':container_key, 'instance_type':instance_type, 'diskspace':diskspace, \
'name':name, 'custom_data':custom_data}
response = make_request(call, params)
return response
# Create and Manage Snapshots
def list_snapshots(tag=None, featured=None, title=None, page=0, perPage=1000 ,sortby='popularity'):
call = 'list_snapshots'
params = {'tag':tag, 'featured':featured, 'title':title,'page':page, 'perPage':perPage, 'sortby':sortby}
response = make_request(call, params)
return response
def count_snapshots(tag=None, featured=None, title=None):
call = 'count_snapshots'
params = {'tag':tag, 'featured':featured, 'title':title}
response = make_request(call, params)
return response
def delete_snapshot(snapshot_id):
call = 'delete_snapshot'
params = {'snapshot_id':snapshot_id}
response = make_request(call, params)
return response
def edit_snapshot(snapshot_id, body=None, title=None, readme=None, tags=None):
call = 'edit_snapshot'
params = {'snapshot_id':snapshot_id, 'body':body, 'title':title, 'readme':readme, 'tags':tags}
response = make_request(call, params)
return response
def snapshot_terminal(container_key, body=None, title=None, readme=None, tags=None, public=None):
call = 'snapshot_terminal'
params = {'container_key':container_key, 'body':body, 'title':title, 'readme':readme, \
'tags':tags, 'public':public}
response = make_request(call, params)
return response
# Manage Terminal Access
def add_terminal_links(container_key, links):
call = 'add_terminal_links'
params= {'container_key':container_key, 'links':links}
response = make_request(call, params)
return response
def remove_terminal_links(container_key, links):
call = 'remove_terminal_links'
params= {'container_key':container_key, 'links':links}
response = make_request(call, params)
return response
def list_terminal_access(container_key):
call = 'list_terminal_access'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def edit_terminal_access(container_key, is_public_list, access_rules):
call = 'edit_terminal_access'
params = {'container_key':container_key, 'is_public_list':is_public_list, 'access_rules':access_rules}
response = make_request(call, params)
return response
# Manage Terminal DNS & Domains
def get_cname_records():
call = 'get_cname_records'
params = {}
response = make_request(call, params)
return response
def add_domain_to_pool(domain):
call = 'add_domain_to_pool'
params = {'domain':domain}
response = make_request(call, params)
return response
def remove_domain_from_pool(domain):
call = 'remove_domain_from_pool'
params = {'domain':domain}
response = make_request(call, params)
return response
def add_cname_record(domain, subdomain, port):
call = add_cname_record
params = {'domain':domain, 'subdomain':subdomain, 'port':port}
response = make_request(call, params)
return response
def remove_cname_record(domain):
call = 'remove_cname_record'
params = {'domain':domain}
response = make_request(call, params)
return response
# Manage Terminal Idle Settings
def set_terminal_idle_settings(container_key, triggers=None, action=None):
call = 'set_terminal_idle_settings'
params = {'container_key':container_key}
response = make_request(call, params)
return response
def get_terminal_idle_setting(container_key):
call = 'get_terminal_idle_setting'
params = {'container_key':container_key}
response = make_request(call, params)
return response
# Manage Usage and Credits
def instance_types():
call = 'instance_types'
params = None
response = make_request(call, params)
return response
def instance_price(instance_type):
call = 'instance_price'
params = {'instance_type':instance_type}
response = make_request(call, params)
return response
def balance():
call = 'balance'
params = None
response = make_request(call, params)
return response
def balance_added():
call = 'balance_added'
params = None
response = make_request(call, params)
return response
def gift(email, cents):
call = 'gift'
params = {'email':email, 'cents':cents}
response = make_request(call, params)
return response
def burn_history():
call = 'burn_history'
params = None
response = make_request(call, params)
return response
def terminal_usage_history():
call = 'terminal_usage_history'
params = None
response = make_request(call, params)
return response
def burn_state():
call = 'burn_state'
params = None
response = make_request(call, params)
return response
def burn_estimates():
call = 'burn_estimates'
params = None
response = make_request(call, params)
return response
# Manage SSH Public Keys
def add_authorized_key_to_terminal(container_key, publicKey):
call = 'add_authorized_key_to_terminal'
params = {'container_key':container_key, 'publicKey':publicKey}
response = make_request(call, params)
return response
def add_authorized_key_to_ssh_proxy(name, publicKey):
call = 'add_authorized_key_to_ssh_proxy'
params = {'name':name, 'publicKey':publicKey}
try:
response = make_request(call, params)
except Exception, e:
return {'status':e}
return response
def del_authorized_key_from_ssh_proxy(name, fingerprint):
call = 'del_authorized_key_from_ssh_proxy'
params = {'name':name, 'fingerprint':fingerprint}
response = make_request(call, params)
return response
def get_authorized_keys_from_ssh_proxy():
call = 'get_authorized_keys_from_ssh_proxy'
params = None
response = make_request(call, params)
return response
# Other
def request_progress(request_id):
call = 'request_progress'
params = {'request_id':request_id}
response = make_request(call, params)
return response
def who_am_i():
call = 'who_am_i'
response = make_request(call)
return response | [
"[email protected]"
]
| |
a63d008f4a88eae9d409a21dec51ccc26b7b1055 | 0946fc233478fec9eac9eb247d45667c3b3989e8 | /reassignment.py | ab002343bf6d67cf8ea6059c7d08a196ae10471d | []
| no_license | hoinx/music-processing-experiments | 0da954e268d6a7120729cb3b012f53d836cd1dc7 | bdd76fa8a8e1b90c8d4e610dcd3a6beadc2e7b1d | refs/heads/master | 2020-04-30T13:00:35.283119 | 2015-11-02T19:54:46 | 2015-11-02T19:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,545 | py | import os
import matplotlib.pyplot as plt
import numpy as np
import scipy
from files import load_wav
from spectrogram import real_half, create_window
from analysis import split_to_blocks
from tuning import pitch_to_freq, freq_to_pitch, pitch_bin_range, quantize_freqs_to_pitch_bins
def cross_spectrum(spectrumA, spectrumB):
'''
Returns a cross-spectrum, ie. spectrum of cross-correlation of two signals.
This result does not depend on the order of the arguments.
Since we already have the spectra of signals A and B and and want the
spectrum of their cross-correlation, we can replace convolution in time
domain with multiplication in frequency domain.
'''
return spectrumA * spectrumB.conj()
def shift_right(values):
'''
Shifts the array to the right by one place, filling the empty values with
zeros.
TODO: use np.roll()
'''
return np.hstack([np.zeros((values.shape[0],1)), values[..., :-1]])
def shifted_amplitude_pair(amplitudes):
'''
Fakes looking at the previous frame shifted by one sample.
In order to work only with one frame of size N and not N + 1, we fill the
missing value with zero. This should not introduce a large error, since the
borders of the amplitude frame will go to zero anyway due to applying a
window function in the STFT tranform.
Returns: (previous amplitudes, current amplitudes)
'''
prevAmplitudes = shift_right(amplitudes)
return prevAmplitudes, amplitudes
def arg(crossSpectrum):
return np.mod(np.angle(crossSpectrum) / (2 * np.pi), 1.0)
def estimate_instant_freqs(crossTimeSpectrum):
'''
Channelized instantaneous frequency - the vector of simultaneous
instantaneous frequencies computed over a single frame of the digital
short-time Fourier transform.
Instantaneous frequency - derivative of phase by time.
cif = angle(crossSpectrumTime) * sampleRate / (2 * pi)
In this case the return value is normalized (not multiplied by sampleRate).
Basically it is phase normalized to the [0.0; 1.0] interval,
instead of absolute [0.0; sampleRate].
'''
return arg(crossTimeSpectrum)
def estimate_group_delays(crossFreqSpectrum):
return 0.5 - arg(crossFreqSpectrum)
def open_file(filename, block_size, hop_size):
song, fs = load_wav(filename)
x, times = split_to_blocks(song, block_size, hop_size=hop_size)
return x, times, fs
def compute_spectra(x, w):
X = np.fft.fft(x * w)
X_prev_time = np.fft.fft(shift_right(x) * w)
X_prev_freq = shift_right(X)
X_cross_time = cross_spectrum(X, X_prev_time)
X_cross_freq = cross_spectrum(X, X_prev_freq)
X_inst_freqs = estimate_instant_freqs(X_cross_time)
X_group_delays = estimate_group_delays(X_cross_freq)
return X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays
def db_scale(magnitude_spectrum):
# min_amplitude = 1e-6
# threshold = -np.log10(min_amplitude)
# return ((threshold + np.log10(np.maximum(min_amplitude, magnitude_spectrum))) / threshold)
return 20 * np.log10(np.maximum(1e-6, magnitude_spectrum))
def requantize_f_spectrogram(X_cross, X_instfreqs, to_log=True):
'''Only requantize by frequency'''
X_reassigned = np.empty(X_cross.shape)
N = X_cross.shape[1]
magnitude_spectrum = abs(X_cross) / N
weights = magnitude_spectrum
for i in range(X_cross.shape[0]):
X_reassigned[i, :] = np.histogram(X_instfreqs[i], N, range=(0,1), weights=weights[i])[0]
X_reassigned = X_reassigned ** 2
if to_log:
X_reassigned = db_scale(X_reassigned)
return X_reassigned
def requantize_tf_spectrogram(X_group_delays, X_inst_freqs, times, block_size, fs, weights=None):
block_duration = block_size / fs
block_center_time = block_duration / 2
X_time = np.tile(times + block_center_time, (X_group_delays.shape[1], 1)).T \
+ X_group_delays * block_duration
time_range = (times[0], times[-1] + block_duration)
freq_range = (0, 1)
bins = X_inst_freqs.shape
# time_range = (0, 2)
# freq_range = (0, 0.4)
counts, x_edges, y_edges = np.histogram2d(
X_time.flatten(), X_inst_freqs.flatten(),
weights=weights.flatten(),
range=(time_range, freq_range),
bins=bins)
return counts, x_edges, y_edges
def process_spectrogram(filename, block_size, hop_size):
x, times, fs = open_file(filename, block_size, hop_size)
w = create_window(block_size)
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
X_reassigned_f = requantize_f_spectrogram(X_cross_time, X_inst_freqs)
# N = X_cross.shape[1]
# magnitude_spectrum = abs(X_cross_time) / N
# weights = db_scale(magnitude_spectrum)
X_magnitudes = abs(X_cross_time) / X.shape[1]
weights = X_magnitudes
X_reassigned_tf = requantize_tf_spectrogram(X_group_delays, X_inst_freqs, times, block_size, fs, weights)[0]
X_reassigned_tf = db_scale(X_reassigned_tf ** 2)
image_filename = os.path.basename(filename).replace('.wav', '.png')
scipy.misc.imsave('reassigned_f_' + image_filename, real_half(X_reassigned_f).T[::-1])
scipy.misc.imsave('reassigned_tf_' + image_filename, real_half(X_reassigned_tf).T[::-1])
scipy.misc.imsave('normal_' + image_filename, real_half(X_magnitudes).T[::-1])
# X_time = X_group_delays + np.tile(np.arange(X.shape[0]).reshape(-1, 1), X.shape[1])
# idx = (abs(X).flatten() > 10) & (X_inst_freqs.flatten() < 0.5)
# plt.scatter(X_time.flatten()[idx], X_inst_freqs.flatten()[idx], alpha=0.1)
# plt.savefig('scatter_' + image_filename)
def reassigned_spectrogram(x, w, to_log=True):
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
X_reassigned_f = requantize_f_spectrogram(X_cross_time, X_inst_freqs, to_log)
return real_half(X_reassigned_f)
def chromagram(x, w, fs, bin_range=(-48, 67), bin_division=1, to_log=True):
"complete reassigned spectrogram with requantization to pitch bins"
# TODO: better give frequency range
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
n_blocks, n_freqs = X_cross_time.shape
X_mag = abs(X_cross_time) / n_freqs
weights = real_half(X_mag).flatten()
eps = np.finfo(np.float32).eps
pitch_bins = quantize_freqs_to_pitch_bins(np.maximum(fs * real_half(X_inst_freqs), eps), bin_division=bin_division).flatten()
nonzero_ix = abs(weights) > eps
X_chromagram = np.histogram2d(
np.repeat(np.arange(n_blocks), n_freqs / 2),
pitch_bins,
bins=(np.arange(n_blocks + 1),
np.arange(bin_range[0], bin_range[1] + 1, 1 / bin_division)),
weights=weights
)[0]
X_chromagram = X_chromagram ** 2
if to_log:
X_chromagram = db_scale(X_chromagram)
return X_chromagram
def tf_scatter():
idx = (abs(X).flatten() > 10) & (X_inst_freqs.flatten() < 0.5)
scatter(X_time.flatten()[idx], X_inst_freqs.flatten()[idx], alpha=0.1)
def test_cross_spectrum():
a = np.array([1j, 1+3j])
b = np.array([2, 4j])
c = np.array([-2j, 12+4j])
assert_array_equals(cross_spectrum(a, b), c)
def test_shifted_amplitude_pair():
actual = shifted_amplitude_pair(np.array([1,2,3]))
assert_array_equals(actual[0], np.array([0, 1, 2]))
assert_array_equals(actual[1], np.array([1, 2, 3]))
def assert_array_equals(a, b):
assert (a == b).all()
if __name__ == '__main__':
import sys
process_spectrogram(filename=sys.argv[1], block_size=2048, hop_size=512)
| [
"[email protected]"
]
| |
6aaeff9c56d64847c2c21f107e952413f161274a | c8bdd4dc2153b77498bc5b9f7f2208712a556ea1 | /mb_aligner/dal/section.py | 7bec24c75c75c218863de3cb19530da9a5c11784 | [
"MIT"
]
| permissive | adisuissa/mb_aligner | ac0b9d567a7da70dcc60d570505080e3b75cec32 | 1c92ce7df862908ebaab9ded54985c2413b9d2c5 | refs/heads/master | 2023-07-19T21:08:56.716304 | 2019-09-11T13:49:58 | 2019-09-11T13:49:58 | 203,250,865 | 0 | 3 | MIT | 2023-07-06T21:05:45 | 2019-08-19T21:11:58 | Python | UTF-8 | Python | false | false | 13,527 | py | from collections import defaultdict
import os
import json
from mb_aligner.dal.tile import Tile
from mb_aligner.dal.mfov import Mfov
import csv
import numpy as np
import cv2
import subprocess
class Section(object):
"""
Represents a single section (at least one mfov) in the system
"""
def __init__(self, mfovs_dict, **kwargs):
self._mfovs_dict = mfovs_dict
# Initialize default values
self._layer = kwargs.get("layer", None)
self._bbox = kwargs.get("bbox", None)
# initialize values using kwargs
#elif self._mfovs_dict is not None and len(self._mfovs_dict) > 0:
# self._layer = self._mfovs_dict.values()[0].layer
self._wafer_section = kwargs.get("wafer_section", (None, None))
self._canonical_section_name = None
@classmethod
def create_from_tilespec(cls, tilespec, **kwargs):
"""
Creates a section from a given tilespec
"""
per_mfov_tiles = defaultdict(list)
min_xys = []
max_xys = []
#min_xy = np.array([np.finfo('float32').max, np.finfo('float32').max])
#max_xy = np.array([np.finfo('float32').min, np.finfo('float32').min])
for tile_ts in tilespec:
per_mfov_tiles[tile_ts["mfov"]].append(Tile.create_from_tilespec(tile_ts))
min_xys.append(tile_ts['bbox'][::2])
max_xys.append(tile_ts['bbox'][1::2])
min_xy = np.min(min_xys, axis=0)
max_xy = np.max(max_xys, axis=0)
bbox = [min_xy[0], max_xy[0], min_xy[1], max_xy[1]]
layer = int(tilespec[0]["layer"])
all_mfovs = {mfov_idx:Mfov(mfov_tiles_list) for mfov_idx, mfov_tiles_list in per_mfov_tiles.items()}
return Section(all_mfovs, layer=layer, bbox=bbox, **kwargs)
@classmethod
def _parse_coordinates_file(cls, input_file):
# Read the relevant mfovs tiles locations
images_dict = {}
images = []
x = []
y = []
# Instead of just opening the file, opening the sorted file, so the tiles will be arranged
sorted_lines = subprocess.check_output('cat "{}" | sort'.format(input_file), shell=True)
assert(len(sorted_lines) > 0)
sorted_lines = sorted_lines.decode('ascii').split('\r\n')
for line in sorted_lines:
line_data = line.split('\t')
img_fname = line_data[0].replace('\\', '/')
# Make sure that the mfov appears in the relevant mfovs
if not (img_fname.split('/')[0]).isdigit():
# skip the row
continue
img_sec_mfov_beam = '_'.join(img_fname.split('/')[-1].split('_')[:3])
# Make sure that no duplicates appear
if img_sec_mfov_beam not in images_dict.keys():
images.append(img_fname)
images_dict[img_sec_mfov_beam] = len(images) - 1
cur_x = float(line_data[1])
cur_y = float(line_data[2])
x.append(cur_x)
y.append(cur_y)
else:
# Either the image is duplicated, or a newer version was taken,
# so make sure that the newer version is used
prev_img_idx = images_dict[img_sec_mfov_beam]
prev_img = images[prev_img_idx]
prev_img_date = prev_img.split('/')[-1].split('_')[-1]
curr_img_date = img_fname.split('/')[-1].split('_')[-1]
if curr_img_date > prev_img_date:
images[prev_img_idx] = img_fname
images_dict[img_sec_mfov_beam] = img_fname
cur_x = float(line_data[1])
cur_y = float(line_data[2])
x[prev_img_idx] = cur_x
y[prev_img_idx] = cur_y
return images, np.array(x), np.array(y)
@classmethod
def create_from_full_image_coordinates(cls, full_image_coordinates_fname, layer, tile_size=None, relevant_mfovs=None, **kwargs):
"""
Creates a section from a given full_image_coordinates filename
"""
images, x_locs, y_locs = Section._parse_coordinates_file(full_image_coordinates_fname)
assert(len(images) > 0)
section_folder = os.path.dirname(full_image_coordinates_fname)
# Update tile_size if needed
if tile_size is None:
# read the first image
img_fname = os.path.join(section_folder, images[0])
img = cv2.imread(img_fname, 0)
tile_size = img.shape
# normalize the locations of all the tiles (reset to (0, 0))
x_locs -= np.min(x_locs)
y_locs -= np.min(y_locs)
# Create all the tiles
per_mfov_tiles = defaultdict(list)
for tile_fname, tile_x, tile_y, in zip(images, x_locs, y_locs):
tile_fname = os.path.join(section_folder, tile_fname)
# fetch mfov_idx, and tile_idx
split_data = os.path.basename(tile_fname).split('_')
mfov_idx = int(split_data[1])
if relevant_mfovs is not None and mfov_idx not in relevant_mfovs:
continue
tile_idx = int(split_data[2])
print('adding mfov_idx %d, tile_idx %d' % (mfov_idx, tile_idx))
tile = Tile.create_from_input(tile_fname, tile_size, (tile_x, tile_y), layer, mfov_idx, tile_idx)
per_mfov_tiles[mfov_idx].append(tile)
all_mfovs = {mfov_idx:Mfov(mfov_tiles_list) for mfov_idx, mfov_tiles_list in per_mfov_tiles.items()}
# Just take any tile's width and height to compute the max_x,y values
max_x = np.max(x_locs) + tile_size[1]
max_y = np.max(y_locs) + tile_size[0]
bbox = [0, max_x, 0, max_y]
return Section(all_mfovs, layer=layer, bbox=bbox, **kwargs)
@classmethod
def _parse_mfov_coordinates_file(cls, input_file):
# Read the relevant mfovs tiles locations
images_dict = {}
images = []
x = []
y = []
# Instead of just opening the file, opening the sorted file, so the tiles will be arranged
sorted_lines = subprocess.check_output('cat "{}" | sort'.format(input_file), shell=True)
assert(len(sorted_lines) > 0)
sorted_lines = sorted_lines.decode('ascii').split('\r\n')
for line in sorted_lines:
line_data = line.split('\t')
img_fname = line_data[0].replace('\\', '/')
if len(img_fname) == 0:
continue
img_sec_mfov_beam = '_'.join(img_fname.split('_')[:3])
# Make sure that no duplicates appear
if img_sec_mfov_beam not in images_dict.keys():
images.append(img_fname)
images_dict[img_sec_mfov_beam] = len(images) - 1
cur_x = float(line_data[1])
cur_y = float(line_data[2])
x.append(cur_x)
y.append(cur_y)
else:
# Either the image is duplicated, or a newer version was taken,
# so make sure that the newer version is used
prev_img_idx = images_dict[img_sec_mfov_beam]
prev_img = images[prev_img_idx]
prev_img_date = prev_img.split('_')[-1]
curr_img_date = img_fname.split('_')[-1]
if curr_img_date > prev_img_date:
images[prev_img_idx] = img_fname
images_dict[img_sec_mfov_beam] = img_fname
cur_x = float(line_data[1])
cur_y = float(line_data[2])
x[prev_img_idx] = cur_x
y[prev_img_idx] = cur_y
return images, np.array(x), np.array(y)
@classmethod
def create_from_mfovs_image_coordinates(cls, mfovs_image_coordinates_fnames, layer, tile_size=None, relevant_mfovs=None, **kwargs):
"""
Creates a section from multiple per-mfov image_coordinates filenames
"""
images = []
x_locs = []
y_locs = []
for mfov_image_coordinates_fname in mfovs_image_coordinates_fnames:
mfov_images, mfov_x_locs, mfov_y_locs = Section._parse_mfov_coordinates_file(mfov_image_coordinates_fname)
images.extend([os.path.join(os.path.dirname(mfov_image_coordinates_fname), mfov_image) for mfov_image in mfov_images])
x_locs.extend(mfov_x_locs)
y_locs.extend(mfov_y_locs)
assert(len(images) > 0)
# Update tile_size if needed
if tile_size is None:
# read the first image - assuming all files of the same shape
img_fname = images[0]
img = cv2.imread(img_fname, 0)
tile_size = img.shape
# normalize the locations of all the tiles (reset to (0, 0))
x_locs -= np.min(x_locs)
y_locs -= np.min(y_locs)
# Create all the tiles
per_mfov_tiles = defaultdict(list)
for tile_fname, tile_x, tile_y, in zip(images, x_locs, y_locs):
# fetch mfov_idx, and tile_idx
split_data = os.path.basename(tile_fname).split('_')
mfov_idx = int(split_data[1])
if relevant_mfovs is not None and mfov_idx not in relevant_mfovs:
continue
tile_idx = int(split_data[2])
print('adding mfov_idx %d, tile_idx %d' % (mfov_idx, tile_idx))
tile = Tile.create_from_input(tile_fname, tile_size, (tile_x, tile_y), layer, mfov_idx, tile_idx)
per_mfov_tiles[mfov_idx].append(tile)
all_mfovs = {mfov_idx:Mfov(mfov_tiles_list) for mfov_idx, mfov_tiles_list in per_mfov_tiles.items()}
# Just take any tile's width and height to compute the max_x,y values
max_x = np.max(x_locs) + tile_size[1]
max_y = np.max(y_locs) + tile_size[0]
bbox = [0, max_x, 0, max_y]
return Section(all_mfovs, layer=layer, bbox=bbox, **kwargs)
@property
def layer(self):
"""
Returns the section layer number
"""
return self._layer
@property
def tilespec(self):
"""
Returns a tilespec representation of the section
"""
ret = []
# Order the mfovs by the mfov index
sorted_mfov_idxs = sorted(self._mfovs_dict.keys())
for mfov_idx in sorted_mfov_idxs:
ret.extend(self._mfovs_dict[mfov_idx].tilespec)
return ret
@property
def bbox(self):
"""
Returns the bounding box [min_x, max_x, min_y, max_y] of the section
"""
# TODO - update the section bounding box when the tile's bounding boxes are updated
return self._bbox
@property
def wafer_section(self):
"""
Returns a tuple of (wafer, section) of the original acquired section (set by the microscope software).
Note that if the user didn't supply it, it is set to (None, None).
"""
return self._wafer_section
@property
def canonical_section_name(self):
"""
Returns a canonical output name for the section (no suffix).
The canonical name will be:
[layer]_W[wafer]_Sec[section], where layer is 4 digits, wafer is 2 digits, and section is 3 digits.
"""
if self._canonical_section_name is None:
assert(self._layer is not None)
wafer, section = self._wafer_section
self._canonical_section_name = '{}_W{}_Sec{}'.format(str(self._layer).zfill(4), str(wafer).zfill(2), str(section).zfill(3))
return self._canonical_section_name
@property
def canonical_section_name_no_layer(self):
"""
Returns a canonical output name for the section (no suffix) w/o the layer prefix.
The result name will be:
W[wafer]_Sec[section], where wafer is 2 digits, and section is 3 digits.
"""
return self.canonical_section_name[5:]
def save_as_json(self, out_fname):
"""
Saves the section as a tilespec
"""
with open(out_fname, 'w') as out_f:
json.dump(self.tilespec, out_f, sort_keys=True, indent=4)
def get_mfov(self, mfov_idx):
'''
Returns the mfov of the given mfov_idx
'''
return self._mfovs_dict[mfov_idx]
def mfovs(self):
'''
A generator that iterates over all the mfovs in the section
'''
mfov_keys = sorted(self._mfovs_dict.keys())
for mfov_idx in mfov_keys:
yield self._mfovs_dict[mfov_idx]
@property
def mfovs_num(self):
"""
Returns the number of mfovs in the section.
"""
return len(self._mfovs_dict)
def tiles(self):
'''
A generator that iterates over all the tiles in the section
'''
for mfov in self.mfovs():
for tile in mfov.tiles():
yield tile
def remove_tile(self, mfov_index, tile_index):
'''
Removes a single tile from the section.
'''
mfov = self._mfovs_dict[mfov_index]
mfov.remove_tile(tile_index)
if __name__ == '__main__':
section = Section.create_from_full_image_coordinates('/n/home10/adisuis/Harvard/git/rh_aligner/tests/ECS_test9_cropped/images/010_S10R1/full_image_coordinates.txt', 5)
for mfov in section.mfovs():
print("Mfov idx: %d" % mfov.mfov_index)
for tile in section.tiles():
print("Tile idx: %d (mfov %d)" % (tile.tile_index, tile.mfov_index))
| [
"[email protected]"
]
| |
aa58c72a61686c3fcbfc652c3ea34db79cf29d43 | ec5c35ac5163c4e81262a81a6a6c46667c01733d | /server/auth.py | b279dc4faf0008a333cb253cdb50ed329e219f6b | []
| no_license | kotawiw/bytedance-exercise-2 | 27b32d81aa7e8040c1c8448acbe9c4ff20ff5b26 | 8db190487a6490ec852d8418d93ba62251a5437f | refs/heads/master | 2022-12-24T00:04:53.047395 | 2020-09-23T11:48:13 | 2020-09-23T11:48:13 | 297,948,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import functools
from flask import Blueprint
from flask import g
from flask import request
from flask import session
from flask import abort
from flask import jsonify
from server.models.users import User
bp = Blueprint("auth", __name__, url_prefix="/api/auth")
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'user' not in g:
return abort(401, "Login required")
return view(**kwargs)
return wrapped_view
@bp.before_app_request
def load_logged_in_user():
user_id = session.get("user_id")
if user_id:
g.user = User.query.get(user_id)
@bp.route("/status")
def get_status():
if 'user' not in g:
return jsonify({"loggedIn": False})
return jsonify({
"loggedIn": True,
"email": g.user.email
})
@bp.route("/register", methods=("POST",))
def register():
autologin = request.args.get("autologin", default="True").lower() == "true"
email = request.json.get('email')
password = request.json.get('password')
user = User.try_register(email, password)
if not user:
print(email, password)
return abort(400, "Invalid email or password")
if autologin:
session["user_id"] = user.id
return jsonify({'email': user.email, 'loggedIn': autologin})
@bp.route("/login", methods=("POST",))
def login():
email = request.json['email']
password = request.json['password']
user = User.by_email_password(email, password)
if not user:
return abort(401, "Incorrect email or password")
session["user_id"] = user.id
return jsonify({'email': user.email, 'loggedIn': True})
@bp.route("/logout")
def logout():
user = None
if 'user' in g:
user = g.user
session.clear()
if not user:
return jsonify({"loggedIn": False})
return jsonify({
"loggedIn": True,
"email": user.email
})
| [
"[email protected]"
]
| |
0053a7079faf9fa027992cb3f4a7f048e6e8bee4 | 0e2768a4a21367c4c2f89976f2dadd5c94fec09b | /StarGAN/age_test.py | b6cf116b145149480910e3303ec1f6fc45ca770a | []
| no_license | xogus1107/Capstone | 1dcea5d7fd1d603c8b31133d7a64325fee8b72ed | e84fd5a2e5419da969a8013a4ee0b98b4c1d22cf | refs/heads/master | 2020-03-30T03:47:23.028610 | 2018-12-11T02:53:33 | 2018-12-11T02:53:33 | 150,708,066 | 1 | 1 | null | 2018-12-11T02:53:34 | 2018-09-28T08:16:27 | Python | UTF-8 | Python | false | false | 3,984 | py | import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
def str2bool(v):
return v.lower() in ('true')
def main(config):
# For fast training.
cudnn.benchmark = True
# Create directories if not exist.
if not os.path.exists(config.log_dir):
os.makedirs(config.log_dir)
if not os.path.exists(config.model_save_dir):
os.makedirs(config.model_save_dir)
if not os.path.exists(config.sample_dir):
os.makedirs(config.sample_dir)
if not os.path.exists(config.result_dir):
os.makedirs(config.result_dir)
# Data loader.
data_loader = get_loader(config.image_dir, config.crop_size, config.image_size, config.batch_size,
'test', config.num_workers)
# Solver for training and testing StarGAN.
solver = Solver(data_loader, config)
solver.test()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--c_dim', type=int, default=10, help='dimension of domain labels')
parser.add_argument('--crop_size', type=int, default=128, help='crop size for the dataset')
parser.add_argument('--image_size', type=int, default=128, help='image resolution')
parser.add_argument('--g_conv_dim', type=int, default=64, help='number of conv filters in the first layer of G')
parser.add_argument('--d_conv_dim', type=int, default=64, help='number of conv filters in the first layer of D')
parser.add_argument('--g_repeat_num', type=int, default=6, help='number of residual blocks in G')
parser.add_argument('--d_repeat_num', type=int, default=6, help='number of strided conv layers in D')
parser.add_argument('--lambda_cls', type=float, default=1, help='weight for domain classification loss')
parser.add_argument('--lambda_rec', type=float, default=10, help='weight for reconstruction loss')
parser.add_argument('--lambda_gp', type=float, default=10, help='weight for gradient penalty')
# Training configuration.
parser.add_argument('--batch_size', type=int, default=16, help='mini-batch size')
parser.add_argument('--num_iters', type=int, default=1000000, help='number of total iterations for training D')
parser.add_argument('--num_iters_decay', type=int, default=100000, help='number of iterations for decaying lr')
parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G')
parser.add_argument('--d_lr', type=float, default=0.0001, help='learning rate for D')
parser.add_argument('--n_critic', type=int, default=5, help='number of D updates per each G update')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
parser.add_argument('--resume_iters', type=int, default=None, help='resume training from this step')
# Test configuration.
parser.add_argument('--test_iters', type=int, default=1000000, help='test model from this step')
# Miscellaneous.
parser.add_argument('--num_workers', type=int, default=1)
parser.add_argument('--use_tensorboard', type=str2bool, default=True)
# Directories.
parser.add_argument('--image_dir', type=str, default='age/test')
parser.add_argument('--log_dir', type=str, default='age/logs')
parser.add_argument('--model_save_dir', type=str, default='age/models')
parser.add_argument('--sample_dir', type=str, default='age/samples')
parser.add_argument('--result_dir', type=str, default='age/results')
# Step size.
parser.add_argument('--log_step', type=int, default=10)
parser.add_argument('--sample_step', type=int, default=1000)
parser.add_argument('--model_save_step', type=int, default=10000)
parser.add_argument('--lr_update_step', type=int, default=1000)
config = parser.parse_args()
print(config)
main(config)
| [
"[email protected]"
]
| |
b3aa4d9fb003f4ac0049040ec5cd7a6d3f657b93 | 2c4a2790457a2c16c9c92bc2e6feeb6cc9271994 | /laptop/catkin_ws/build/turtlebot3/turtlebot3_pointop/catkin_generated/pkg.installspace.context.pc.py | 2bf9e1ccc9e204260bada0f003da4a1983ed2144 | []
| no_license | Zoltan3057/skalman | df37e6f376665363b6ea73c6c16fe8312e473e07 | 0e4cead39f6328c74622dd11688837b77152ff17 | refs/heads/master | 2021-09-22T22:26:07.265321 | 2018-09-17T20:47:05 | 2018-09-17T20:47:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_pointop"
PROJECT_SPACE_DIR = "/home/fregu856/skalman/laptop/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
]
| |
50c6172473e34cff7b5a5a941eb2be17cd93f182 | b4e4ce76c56652a41b7f89070e20168851152ed1 | /core/solver_ddpg.py | 6bc6f0cef9f448ff783fd97c48097fbf80417cf7 | [
"MIT"
]
| permissive | electryone/deeprl | acb6a21b517344e353c9776e600e8bc0ac4d962b | 449a2f6e31b0e03b60a465d241cca953a0bd9598 | refs/heads/master | 2020-03-28T20:28:31.849650 | 2017-08-30T20:08:56 | 2017-08-30T20:08:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,569 | py | # -------------------------------------------------------------------#
# Released under the MIT license (https://opensource.org/licenses/MIT)
# Contact: [email protected]
# Enhancement Copyright 2017, Mrinal Haloi
# -------------------------------------------------------------------#
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import time
from models.ddpg_model import ActorModel, CriticModel
from core.base import Base
from utils import utils
import core.logger as log
class SolverDDPG(Base):
def __init__(self, cfg, environment, sess, model_dir, **kwargs):
self.s_dim = environment.state_dim
self.a_dim = environment.action_dim
self.inputs_actor = tf.placeholder(
'float32', [None, self.s_dim], name='inputs_actor')
self.target_inputs_actor = tf.placeholder(
'float32', [None, self.s_dim], name='target_inputs_actor')
self.inputs_critic = tf.placeholder(
'float32', [None, self.s_dim], name='inputs_critic')
self.target_inputs_critic = tf.placeholder(
'float32', [None, self.s_dim], name='target_inputs_critic')
self.actions = tf.placeholder(
'float32', [None, self.a_dim], name='actions')
self.target_actions = tf.placeholder(
'float32', [None, self.a_dim], name='target_actions')
self.target_q_t = tf.placeholder('float32', [None], name='target_q_t')
self.learning_rate = tf.placeholder(
'float32', shape=[], name="learning_rate_placeholder")
super(SolverDDPG, self).__init__(cfg, environment, sess,
model_dir, state='simple', state_dim=self.s_dim, **kwargs)
def train(self):
start_time = time.time()
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
max_avg_ep_reward = 0
ep_rewards, actions = [], []
screen, reward, action, terminal = self.env.new_game()
self.optim_actor, self.end_points_actor, self.end_points_target_actor = self.tower_loss_actor(
self.inputs_actor, self.target_inputs_actor, actor_name='actor/main_')
self.optim_critic, self.loss_critic, self.end_points_critic, self.end_points_target_critic = self.tower_loss_critic(
self.inputs_critic, self.target_inputs_critic, self.actions, self.target_actions, critic_name='critic/main_')
tvariables_actor = [
var for var in tf.trainable_variables() if var.name.startswith('actor/')]
tvariables_critic = [
var for var in tf.trainable_variables() if var.name.startswith('critic/')]
self.targetops_actor = self.update_target_graph(
tvariables_actor, self.cfg.tau, main_name='actor/main_', target_name='actor/target_')
self.targetops_critic = self.update_target_graph(
tvariables_critic, self.cfg.tau, main_name='critic/main_', target_name='critic/target_')
self.saver = tf.train.Saver(max_to_keep=None)
init = tf.global_variables_initializer()
self.sess.run(init)
start_step = self.step_op.eval()
i = np.random.randint(11)
j = np.random.randint(19)
first_input = np.reshape(screen, (1, 3)) + (1. / (1. + i + j))
action = self.predict(self.end_points_actor[
'scaled_out'], first_input, agent_type='actor')
for _ in range(self.cfg.history_length):
self.history.add(screen)
for self.step in tqdm(range(start_step, self.cfg.max_step), ncols=70, initial=start_step):
if self.step == self.cfg.learn_start:
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
ep_rewards, actions = [], []
self.updated_lr = self.lr_policy.initial_lr
ep = (self.cfg.ep_end + max(0., (self.cfg.ep_start - self.cfg.ep_end) *
(self.cfg.ep_end_t - max(0., self.step - self.cfg.learn_start)) / self.cfg.ep_end_t))
# 1. predict
action = self.predict(self.end_points_actor[
'scaled_out'], self.history.get(), ep=ep, agent_type='actor')
# 2. act
screen, reward, terminal = self.env.act(action, is_training=True)
# 3. observe
self.observe(np.reshape(screen, self.s_dim), reward,
np.reshape(action[0], self.a_dim), terminal)
if terminal:
screen, reward, action, terminal = self.env.new_random_game()
num_game += 1
ep_rewards.append(ep_reward)
ep_reward = 0.
else:
ep_reward += reward
actions.append(action)
total_reward += reward
if self.step >= self.cfg.learn_start:
if self.step % self.cfg.test_step == self.cfg.test_step - 1:
avg_reward = total_reward / self.cfg.test_step
avg_loss = self.total_loss / self.update_count
avg_q = self.total_q / self.update_count
try:
max_ep_reward = np.max(ep_rewards)
min_ep_reward = np.min(ep_rewards)
avg_ep_reward = np.mean(ep_rewards)
except:
max_ep_reward, min_ep_reward, avg_ep_reward = 0, 0, 0
log.info('\navg_r: %.4f, avg_l: %.6f, avg_q: %3.6f, avg_ep_r: %.4f, max_ep_r: %.4f, min_ep_r: %.4f, # game: %d'
% (avg_reward, avg_loss, avg_q, avg_ep_reward, max_ep_reward, min_ep_reward, num_game))
if max_avg_ep_reward * 0.9 <= avg_ep_reward:
self.step_assign_op.eval(
{self.step_input: self.step + 1})
utils.save_model(self.saver, self.sess,
self.model_dir, self.step + 1)
max_avg_ep_reward = max(
max_avg_ep_reward, avg_ep_reward)
num_game = 0
total_reward = 0.
self.total_loss = 0.
self.total_q = 0.
self.update_count = 0
ep_reward = 0.
ep_rewards = []
actions = []
end_time = time.time()
log.info('Total training time %6.1fs' % start_time - end_time)
def observe(self, screen, reward, action, terminal):
reward = max(self.cfg.min_reward, min(self.cfg.max_reward, reward))
self.history.add(screen)
self.memory.add(screen, reward, action, terminal)
if self.step > self.cfg.learn_start:
if self.step % self.cfg.train_frequency == 0:
self.train_mini_batch()
if self.step % self.cfg.target_q_update_step == self.cfg.target_q_update_step - 1:
self.update_target(self.targetops_actor, self.sess)
self.update_target(self.targetops_critic, self.sess)
def train_mini_batch(self):
if self.memory.count < self.cfg.history_length:
return
else:
s_t, action, reward, s_t_plus_1, terminal = self.memory.sample_simple()
ep = (self.cfg.ep_end + max(0., (self.cfg.ep_start - self.cfg.ep_end) *
(self.cfg.ep_end_t - max(0., self.step - self.cfg.learn_start)) / self.cfg.ep_end_t))
action_s_t_plus_1 = self.predict_target(self.end_points_target_actor[
'scaled_out'], s_t_plus_1, ep=ep, agent_type='actor')
target_q = self.end_points_target_critic['out'].eval(
{self.target_inputs_critic: s_t_plus_1, self.target_actions: action_s_t_plus_1})
terminal = np.array(terminal) + 0.
reward = np.reshape(np.array(reward), (32, 1))
# target_q_t = (1. - terminal) * self.cfg.discount * target_q + reward
target_q_t = self.cfg.discount * target_q + reward
_, q_t, loss = self.sess.run([self.optim_critic, self.end_points_critic['out'], self.loss_critic], {
self.predicted_q_value: target_q_t,
self.actions: action,
self.inputs_critic: s_t,
self.learning_rate: self.updated_lr})
action_out = self.predict(self.end_points_actor[
'scaled_out'], s_t, ep=ep, agent_type='actor', is_train_loop=True)
s_t = np.asarray(s_t, np.float32)
action_out = np.asarray(action_out, np.float32)
a_grads = self.sess.run(
self.action_grads, {self.inputs_critic: s_t, self.actions: action_out})
tmpp = self.sess.run(self.optim_actor, {self.inputs_actor: s_t, self.action_gradients: a_grads[
0], self.learning_rate: self.updated_lr})
# self.writer.add_summary(summary_str, self.step)
self.total_loss += loss
self.total_q += q_t.mean()
self.update_count += 1
def tower_loss_actor(self, inputs, target_inputs, actor_name='actor/main_'):
model_actor = ActorModel()
model_target_actor = ActorModel()
end_points_actor = model_actor.model_def(inputs, self.env, name='main_')
# Target Network
end_points_target_actor = model_target_actor.model_def(
target_inputs, self.env, name='target_')
# This gradient will be provided by the critic network
self.action_gradients = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_model_params = [
var for var in tf.trainable_variables() if var.name.startswith(actor_name)]
self.actor_gradients = tf.gradients(
end_points_actor['scaled_out'], self.actor_model_params, -self.action_gradients)
# Optimization Op
opt = self.optimizer(self.learning_rate, optname=self.cfg.TRAIN.optname)
optim = opt.apply_gradients(
zip(self.actor_gradients, self.actor_model_params))
return optim, end_points_actor, end_points_target_actor
def tower_loss_critic(self, inputs, target_inputs, actions, target_actions, critic_name='critic/main_'):
model_critic = CriticModel()
model_target_critic = CriticModel()
end_points_critic = model_critic.model_def(
inputs, actions, name='main_')
# Target Network
end_points_target_critic = model_target_critic.model_def(
target_inputs, target_actions, self.env, name='target_')
# This gradient will be provided by the critic network
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
loss = tf.reduce_mean(
tf.square(self.predicted_q_value - end_points_critic['out']))
# Optimization Op
opt = self.optimizer(self.learning_rate, optname=self.cfg.TRAIN.optname)
self.critic_model_params = [
var for var in tf.trainable_variables() if var.name.startswith(critic_name)]
self.critic_gradients_vars = opt.compute_gradients(
loss, self.critic_model_params)
optim = opt.apply_gradients(self.critic_gradients_vars)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(end_points_critic['out'], actions)
return optim, loss, end_points_critic, end_points_target_critic
| [
"[email protected]"
]
| |
1ccb569c8cd950c90ba8af01abc664229472ddcc | 172c5da69ed8914dc899f65be8716e0fac55249b | /surveys/views.py | d8838fa92b0dc94e0cbbf96fee6d71ae1036af1a | []
| no_license | TheProrok29/django_questionnaires | cd75f0271e3b1012351c94b0fde4cb87f795ef5c | 2e62bf2e96fa58f96b80c84c04d08825dfb9ac37 | refs/heads/master | 2020-05-01T05:11:52.268230 | 2019-04-07T19:44:30 | 2019-04-07T19:44:30 | 177,295,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,222 | py | from django.shortcuts import render, HttpResponseRedirect
from .models import Survey, Question, Answer
from django.contrib.auth.decorators import login_required
from . import forms
from django.urls import reverse
from django.contrib import messages
@login_required
def surveys(request):
kwargs = {}
kwargs['surveys'] = Survey.objects.filter(user=request.user)
return render(request, 'surveys.html', kwargs)
@login_required
def create(request):
if request.method == 'GET':
kwargs = {}
kwargs['survey_creation_form'] = forms.SurveyCreationForm(
prefix='survey_creation_form')
return render(request, 'create.html', kwargs)
elif request.method == 'POST':
form = forms.SurveyCreationForm(data=request.POST,
prefix='survey_creation_form')
if form.is_valid():
new_survey = form.save(commit=False)
new_survey.user = request.user
new_survey.save()
messages.success(
request, 'Ankieta została utworzona, możesz przystąpić do tworzenia pytań.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': new_survey.id
}))
messages.error(request, 'Niepoprawne wywołanie trasy.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def survey(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
if request.method == 'GET':
kwargs = {}
kwargs['survey'] = user_survey
kwargs['questions'] = Question.objects.filter(survey=user_survey)
kwargs['survey_edit_form'] = forms.SurveyCreationForm(prefix='survey_creation_form',
instance=user_survey)
return render(request, 'survey.html', kwargs)
elif request.method == 'POST':
form = forms.SurveyCreationForm(data=request.POST,
prefix='survey_creation_form',
instance=user_survey)
if form.is_valid():
form.save()
messages.success(request, 'Dane ankiety zostały zmienione.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': survey_id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def delete(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
user_survey.delete()
messages.success(request, 'Wybrana ankieta została usunięta.')
return HttpResponseRedirect(reverse('surveys'))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def create_question(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
if request.method == 'GET':
kwargs = {}
kwargs['survey'] = user_survey
kwargs['question_creation_form'] = forms.QuestionCreationForm(
prefix='question_creation_form')
return render(request, 'question_create.html', kwargs)
elif request.method == 'POST':
form = forms.QuestionCreationForm(data=request.POST,
prefix='question_creation_form')
if form.is_valid():
new_question = form.save(commit=False)
new_question.survey = user_survey
new_question.save()
messages.success(request, 'Pytanie zostało utworzone.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': user_survey.id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def delete_question(request, survey_id, question_id):
try:
survey_question = Question.objects.get(id=question_id)
survey_question.delete()
messages.success(request, 'Wybrane pytanie zostało usunięte.')
except Question.DoesNotExist:
messages.error(request, 'Wybrane pytanie nie istnieje.')
return HttpResponseRedirect(reverse('survey', kwargs={
'survey_id': survey_id
}))
def share(request, survey_id):
try:
kwargs = {}
user_survey = Survey.objects.get(id=survey_id)
survey_questions = Question.objects.filter(survey_id=survey_id)
if request.method == 'GET':
kwargs['survey'] = user_survey
kwargs['questions'] = survey_questions
return render(request, 'share.html', kwargs)
elif request.method == 'POST':
first_name = request.POST['first-name']
answers = "<p>"
for question in survey_questions:
answers += 'Pytanie: %s <br /> Odpowiedź: <em>%s' % (question.name,
request.POST.get(str(question.id), 'Brak'))
answers += '</em><br /><br />'
answers += '</p>'
new_answer = Answer()
new_answer.user = user_survey.user
new_answer.survey = user_survey
new_answer.first_name = first_name
new_answer.answers = answers
new_answer.save()
messages.success(
request, 'Dziękujemy, Twoje odpowiedzi zostały przesłane.')
return HttpResponseRedirect(reverse('share-survey',
kwargs={
'survey_id': user_survey.id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('home'))
@login_required
def answers(request, survey_id):
kwargs = {}
kwargs['answers'] = Answer.objects.filter(survey_id=survey_id).order_by('-created')
return render(request, 'answers.html', kwargs)
@login_required
def delete_answer(request, survey_id, answer_id):
try:
survey_answer = Answer.objects.get(id=answer_id)
survey_answer.delete()
messages.success(request, 'Wybrana odpowiedź została usunięta.')
except Answer.DoesNotExist:
messages.error(request, 'Wybrane pytanie nie istnieje.')
return HttpResponseRedirect(reverse('answers', kwargs={
'survey_id': survey_id
})) | [
"[email protected]"
]
| |
73e4c6bd217f7949adbe8e395dd344f590caaa88 | 57e37e1df9596cdaf86d3dc81cfbe15c5303157f | /SocialMediaenv/social_media/login_app/migrations/0007_auto_20200625_0059.py | b85ab48b079026324aa416a8e4f4f1f51ed52538 | []
| no_license | Tanzin-Ul-Islam/Django_Social_Media | 45d9240b25456768b4cc1976f4971c7921a6ceae | 7578aeb98702bbbb6ae820de11c2c5a4c237243a | refs/heads/master | 2022-11-29T11:37:07.949118 | 2020-08-10T20:15:45 | 2020-08-10T20:15:45 | 286,519,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # Generated by Django 3.0.7 on 2020-06-24 18:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login_app', '0006_auto_20200625_0056'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profilepic',
field=models.ImageField(blank=True, upload_to='userpics'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
90593ff711ea932ec1a2f7a1ec6b49f285920849 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/1267.py | 24da43f932ff6c1352b5a0a632af37122acf1eab | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,444 | py | import unittest
def cut(s, size):
for i in range(0, len(s), size):
yield s[i:i + size]
def parse_combining(s):
d = {}
for mom, dad, kiddo in cut(s, 3):
d[frozenset((mom, dad))] = kiddo
return d
def parse_opposing(s):
l = []
for mom, dad in cut(s, 2):
l.append(frozenset((mom, dad)))
return l
def invoke(s, combining, opposing):
combining_pairs = frozenset(combining.keys())
opposing_pairs = frozenset(opposing)
result = ""
for c in s:
result += c
# print result
while len(result) >= 2:
pair = frozenset((result[-1], result[-2]))
if pair in combining_pairs:
# print "*", pair
result = result[:-2] + combining[pair]
continue
fs = frozenset(result)
for pair in opposing_pairs:
if fs >= pair:
# print "**", pair
result = ""
continue
break
return result
class TestMagicka(unittest.TestCase):
def assertInvoke(self, output, combining, opposing, input):
c = parse_combining(combining)
o = parse_opposing(opposing)
self.assertEqual(output, invoke(input, c, o))
def test_cut(self):
self.assertEqual(["12", "34", "56"], [s for s in cut("123456", 2)])
def test_parse(self):
self.assertEqual({frozenset('QR'):'I'}, parse_combining("QRI"))
def test_all(self):
self.assertInvoke("EA", "", "", "EA")
self.assertInvoke("RIR", "QRI", "", "RRQR")
self.assertInvoke("FDT", "QFT", "QF", "FAQFDFQ")
self.assertInvoke("ZERA", "EEZ", "QE", "QEEEERA")
self.assertInvoke("", "", "QW", "QW")
self.assertInvoke("", "", "QW", "WQ")
self.assertInvoke("CF", "ABCDEF", "XY", "XYABDE")
self.assertInvoke("G", "ABCDEFCFG", "XY", "XYABDE")
if __name__ == '__main__':
# unittest.main()
count = int(raw_input())
for i in range(count):
it = iter(raw_input().split(" "))
combining = parse_combining(it.next() if int(it.next()) > 0 else "")
opposing = parse_opposing(it.next() if int(it.next()) > 0 else "")
s = it.next() if int(it.next()) > 0 else ""
print 'Case #%d: [%s]' % (i + 1, ", ".join(invoke(s, combining, opposing)))
| [
"[email protected]"
]
| |
a3e3b87d073f41bae0250078bf35cd961afc03ef | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/230/users/3356/codes/1599_842.py | 15af2a9064e98315942e60e03b8d7ff9b43b637d | []
| no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
n = int(input())
soma = 0
while (n > 0):
resto = n % 10
n = n // 10
soma = soma + resto
print(soma)
| [
"[email protected]"
]
| |
a23d0a0133f0e15711d6f9797955758dc75ae16e | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/Wordnik/Words/RandomWords.py | 682599bdf169426d581dbe717544f4ca5ebb9bb6 | []
| no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,155 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# RandomWords
# Retrieves a list of random words.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class RandomWords(Choreography):
"""
Create a new instance of the RandomWords Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Wordnik/Words/RandomWords')
def new_input_set(self):
return RandomWordsInputSet()
def _make_result_set(self, result, path):
return RandomWordsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RandomWordsChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the RandomWords
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class RandomWordsInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key from Wordnik.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the ExcludePartOfSpeech input for this choreography. ((optional, string) Excludes the specified comma-delimited parts of speech from the results returned. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
def set_ExcludePartOfSpeech(self, value):
InputSet._set_input(self, 'ExcludePartOfSpeech', value)
"""
Set the value of the HasDefinition input for this choreography. ((optional, string) Only returns words that have dictionary definitions when true. Otherwise false. Defaults to true.)
"""
def set_HasDefinition(self, value):
InputSet._set_input(self, 'HasDefinition', value)
"""
Set the value of the IncludePartOfSpeech input for this choreography. ((optional, string) Only includes the specified comma-delimited parts of speech. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
def set_IncludePartOfSpeech(self, value):
InputSet._set_input(self, 'IncludePartOfSpeech', value)
"""
Set the value of the Limit input for this choreography. ((optional, integer) Maximum number of results to return. Defaults to 10.)
"""
def set_Limit(self, value):
InputSet._set_input(self, 'Limit', value)
"""
Set the value of the MaxCorpus input for this choreography. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count below the given number.)
"""
def set_MaxCorpus(self, value):
InputSet._set_input(self, 'MaxCorpus', value)
"""
Set the value of the MaxDictionaries input for this choreography. ((optional, integer) Maximum number of dictionaries in which the words appear.)
"""
def set_MaxDictionaries(self, value):
InputSet._set_input(self, 'MaxDictionaries', value)
"""
Set the value of the MaxLength input for this choreography. ((optional, integer) Maximum word length.)
"""
def set_MaxLength(self, value):
InputSet._set_input(self, 'MaxLength', value)
"""
Set the value of the MinCorpus input for this choreography. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count above the given number.)
"""
def set_MinCorpus(self, value):
InputSet._set_input(self, 'MinCorpus', value)
"""
Set the value of the MinDictionaries input for this choreography. ((optional, integer) Minimum number of dictionaries in which the words appear.)
"""
def set_MinDictionaries(self, value):
InputSet._set_input(self, 'MinDictionaries', value)
"""
Set the value of the MinLength input for this choreography. ((optional, integer) Minimum word length.)
"""
def set_MinLength(self, value):
InputSet._set_input(self, 'MinLength', value)
"""
Set the value of the ResponseType input for this choreography. ((optional, string) Response can be either JSON or XML. Defaults to JSON.)
"""
def set_ResponseType(self, value):
InputSet._set_input(self, 'ResponseType', value)
"""
Set the value of the SortBy input for this choreography. ((optional, string) Results can be sorted by: alpha, count, or length.)
"""
def set_SortBy(self, value):
InputSet._set_input(self, 'SortBy', value)
"""
Set the value of the SortOrder input for this choreography. ((optional, string) Indicate the order to sort, either asc (ascending) or desc (descending).)
"""
def set_SortOrder(self, value):
InputSet._set_input(self, 'SortOrder', value)
"""
A ResultSet with methods tailored to the values returned by the RandomWords choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class RandomWordsResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Wordnik.)
"""
def get_Response(self):
return self._output.get('Response', None)
class RandomWordsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RandomWordsResultSet(response, path)
| [
"[email protected]"
]
| |
2fec48c9bc4689497d616c4fab84161b86faed78 | ef61c5f177ee44ac08325335fc28a12f3fccbb58 | /resource_management/tests/interactors/test_create_resource.py | 2c7bedc0881b90bbee655ebca9a3137cfff9d3c2 | []
| no_license | bammidichandini/resource_management-chandini | 3c11c7b2eb5e2f8d3df5b55e4d3ee86a27ed5c3a | aa4ec50f0b36a818bebc2033cb39ee928e5be13c | refs/heads/master | 2022-12-01T19:59:25.366843 | 2020-07-23T09:10:42 | 2020-07-23T09:10:42 | 269,610,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | import pytest
from unittest.mock import create_autospec
#from resource_management.exceptions.exceptions import UserCannotManipulateException
from django_swagger_utils.drf_server.exceptions import Forbidden
from resource_management.interactors.storages.resources_storage_interface import StorageInterface
from resource_management.interactors.presenters.presenter_interface import PresenterInterface
from resource_management.interactors.create_resources_interactor import CreateResourceInteractor
@pytest.mark.django_db()
def test_create_resource(resource_dtos):
#arrange
user_id = 1
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
storage.is_admin.return_value = True
interactor = CreateResourceInteractor(
storage=storage,
presenter=presenter
)
#act
interactor.create_resource_interactor(
resource_dtos,
user_id=user_id
)
#assert
storage.create_resource.assert_called_once_with(
resource_dtos,
user_id=user_id
)
storage.is_admin.assert_called_once_with(user_id)
def test_create_resource_with_user(resource_dtos):
#arrange
user_id = 1
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
storage.is_admin.return_value =False
presenter.raise_user_cannot_manipulate_exception.side_effect = \
Forbidden
interactor = CreateResourceInteractor(
storage=storage,
presenter=presenter
)
#act
with pytest.raises(Forbidden):
interactor.create_resource_interactor(
resource_dtos,
user_id=user_id
)
# #assert
# storage.create_resource.assert_called_once_with(
# resource_dtos,
# user_id=user_id
# )
# presenter.raise_user_cannot_manipulate_exception.assert_called_once()
| [
"[email protected]"
]
| |
a3078ed764f5a832bc8be4f0ed37f7616cbcaff7 | a52b63889017a7f099a2575abf251bfadc672349 | /main.py | 40f0b0bd54fb73bd86e5354c476a419e1fd0ed16 | []
| no_license | 602p/siscalc | cda58454a10780cceb018d0405377dbd49444911 | 0c82d1e1acdb0c1c99a09a7d02f47367ef036989 | HEAD | 2016-09-01T15:45:22.996359 | 2016-03-19T06:18:09 | 2016-03-19T06:18:09 | 54,244,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | import browser
class Category(object):
def __init__(self, *a, **k):
self.container=browser.html.DIV()
self.assignments_container=browser.html.DIV()
self.percent=browser.html.INPUT(readonly=True)
self.weight=browser.html.INPUT(type="number")
self.add=browser.html.BUTTON("Add Assignement")
self.delete_this=browser.html.BUTTON("X")
self.container<=browser.html.INPUT(value="Category")
self.container<=" Weight:"
self.container<=self.weight
self.container<=" "
self.container<=self.percent
self.container<=self.add
self.container<=" "
self.container<=self.delete_this
self.container<=browser.html.BR()
self.container<=self.assignments_container
browser.document.getElementById("categories_container")<=self.container
self.add.bind("click", self.add_assignment)
self.delete_this.bind("click", self.remove_this)
self.weight.bind("input", self.update)
categories.append(self)
self.assignments=[]
def register_assignement(self, assignment):
self.assignments.append(assignment)
def update(self, *a, **k):
sum_score=sum([float(a.score.value) for a in self.assignments])
sum_max=sum([float(a.max.value) for a in self.assignments])
self._sum_max=sum_max
self._percent=(sum_score/sum_max)*100
self.percent.value=str(self._percent)+"%"
_update_class()
def _delete(self, a):
self.assignments.remove(a)
self.update()
def remove_this(self, *a, **k):
self.container.clear()
categories.remove(self)
def add_assignment(self, *a, **k):
Assignment(self)
class Assignment(object):
def __init__(self, parent):
self.container=browser.html.DIV("-->")
self.score=browser.html.INPUT(type="number")
self.max=browser.html.INPUT(type="number")
self.percent=browser.html.INPUT(readonly=True)
self.remove=browser.html.BUTTON("X")
self.as_pct=browser.html.BUTTON("%")
self.container<=browser.html.INPUT(value="Assignment")
self.container<=":"
self.container<=self.score
self.container<="/"
self.container<=self.max
self.container<=self.percent
self.container<=self.remove
self.container<=self.as_pct
self.container<=browser.html.BR()
self.parent=parent
self.parent.assignments_container<=self.container
self.score.bind("input", self.update)
self.max.bind("input", self.update)
self.remove.bind("click", self.delete)
self.as_pct.bind("click", self.alert_as_pct)
self.parent.register_assignement(self)
def alert_as_pct(self, *a, **k):
browser.alert("This assignement is "+str((float(self.max.value)/self.parent._sum_max)*100*float(self.parent.weight.value))+"% of your overall grade")
def update(self, *a, **k):
self.percent.value=str((float(self.score.value)/float(self.max.value))*100)+"%"
self.parent.update()
def delete(self, *a, **k):
self.container.clear()
self.parent._delete(self)
categories=[]
browser.document["add_category"].bind("click", Category)
def _update_class():
print("foo")
browser.document["class_pct"].value=str(sum([float(c.weight.value)*c._percent for c in categories]))+"%"
| [
"[email protected]"
]
| |
9006a9071155da1ddc1db98dd44bc61448f755f0 | 7374204324f6326663d12b3dd1fecc5bebb6854e | /algorithm-pattern/binary_op/136.py | 8443de71e568342720ea162d84e376931fc2706a | []
| no_license | KevinChen1994/leetcode-algorithm | c18b58df398027078b0c0f468c4c873e9419433b | 1bcf3206cd3acc428ec690cb883c612aaf708aac | refs/heads/master | 2023-02-07T11:35:47.747207 | 2023-01-29T11:08:49 | 2023-01-29T11:08:49 | 230,386,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2020/7/17 13:51
'''
solution: 使用异或运算,两个数相同为0,不同为1;因为异或操作满足交换律,例如:1^2^2=2^2^1=0^1=1(0异或任意数是等于任意数本身)
'''
class Solution:
def singleNumber(self, nums):
result = nums[0]
for num in nums[1:]:
result = result ^ num
return result
if __name__ == '__main__':
solution = Solution()
nums = [3, 2, 2]
print(solution.singleNumber(nums))
| [
"[email protected]"
]
| |
cf253b4a79d908574d1f8ec1b551a5b8cb92373c | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/546/original/546.remove-boxes.0.py | 4414c0fc7ebe8b59905309e520e365b774e2b427 | []
| no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 1,077 | py | #
# @lc app=leetcode id=546 lang=python3
#
# [546] Remove Boxes
#
# https://leetcode.com/problems/remove-boxes/description/
#
# algorithms
# Hard (37.29%)
# Total Accepted: 8.4K
# Total Submissions: 22.4K
# Testcase Example: '[1,3,2,2,2,3,4,3,1]'
#
# Given several boxes with different colors represented by different positive
# numbers.
# You may experience several rounds to remove boxes until there is no box left.
# Each time you can choose some continuous boxes with the same color (composed
# of k boxes, k >= 1), remove them and get k*k points.
# Find the maximum points you can get.
#
#
# Example 1:
# Input:
#
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
#
# Output:
#
# 23
#
# Explanation:
#
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
# ----> [1, 3, 3, 4, 3, 1] (3*3=9 points)
# ----> [1, 3, 3, 3, 1] (1*1=1 points)
# ----> [1, 1] (3*3=9 points)
# ----> [] (2*2=4 points)
#
#
#
# Note:
# The number of boxes n would not exceed 100.
#
#
#
class Solution:
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
| [
"[email protected]"
]
| |
cf3ff688eca559e72b7a03c95a74663b58dfad04 | 83a506a501561602ad3b259341225ddfbddab160 | /GameServer/socket_server/server/factorys/tornadoFactory.py | 287f7c09b26205ca9792a3d77952117fda7e9526 | []
| no_license | daxingyou/SouYouJi_Game | 9dc5f02eb28b910efb229653a8d0bffe425a7911 | 7311a994c9aba15b7234331709975ebc37e8453d | refs/heads/master | 2023-03-28T01:36:48.955107 | 2020-04-05T01:24:17 | 2020-04-05T01:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,412 | py | # -*- coding:utf-8 -*-
# !/bin/python
"""
Author: Winslen
Date: 2019/10/15
Revision: 1.0.0
Description: Description
"""
import traceback
import tornado
import tornado.httpserver
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
from typing import *
from define.define_redis_key import *
from define.define_consts import *
from public.public_logger import *
class baseFactory(object):
def __init__(self, address='127.0.0.1', port=9797, debug=False, *args, **kwargs):
self.address = address
self.port = port
self.serverTag = '%s:%s' % (self.address, self.port)
self.debug = debug
self.runtime = int(time.time())
self.OrderJob = {}
self._logger_ = None
self.curServerStage = ServerStage.none
self.setServerOrderJob()
def getLogger(self):
logger = getHandlerLogger(fileLabel='%s_%s_server' % (self.address, self.port), loggerLabel='server',
level=logging.DEBUG, handler_types=[Handler_Class.HourFile], when='H')
logger.setLevel(logging.DEBUG)
return logger
def log(self, msg='', level='info'):
if not self._logger_:
self._logger_ = self.getLogger()
try:
if level in ['warn', 'warning']:
self._logger_.warning(msg)
elif level == 'error':
self._logger_.error(msg)
else:
self._logger_.info(msg)
except:
traceback.print_exc()
print(msg)
def setServerOrderJob(self):
self.OrderJob['closeServer'] = self.closeServer
def closeServer(self, waitSecond: int = 60, *args, **kwargs):
waitSecond = int(waitSecond)
if self.curServerStage == ServerStage.readyClose:
return
self.curServerStage = ServerStage.readyClose
self.log('服务器[%s]正在关闭,将在[%s]秒后关闭' % (self.serverTag, waitSecond))
self.add_timeLater_callFunc(delay=waitSecond, callback=self.doCloseServer)
def doCloseServer(self, *args, **kwargs):
self.curServerStage = ServerStage.Closed
tornado.ioloop.IOLoop.current().stop()
self.log('服务器[%s]已经关闭' % (self.serverTag))
def onHeartbeat(self):
timeStamp = int(time.time() * 1000)
try:
self.onTick(timeStamp)
except:
traceback.print_exc()
def onTick(self, timeStamp):
self.checkOrderJobs()
def checkOrderJobs(self):
orderServices = self.getOrderServices()
for _order in orderServices:
_orderArgs = _order.split('|')
jobKey = _orderArgs.pop(0)
jobFunc = self.OrderJob.get(jobKey, None)
if jobFunc:
self.doOrderJobs_before(jobFunc, _orderArgs, _order)
doResult, err = self.doOrderJobs_doing(jobFunc, _orderArgs, _order)
if doResult:
self.doOrderJobs_afterSuc(jobFunc, _orderArgs, _order)
else:
self.doOrderJobs_afterFaild(jobFunc, _orderArgs, _order, err)
def getOrderServices(self):
return []
def notFoundOrderJob(self, jobKey, orderArgs):
self.log('[notFoundOrderJob] 未知任务[%s]=> %s' % (jobKey, orderArgs))
def doOrderJobs_before(self, jobFunc, orderArgs, order):
pass
def doOrderJobs_doing(self, jobFunc, orderArgs, order):
self.log('将要执行[%s]' % (order))
try:
jobFunc(*orderArgs)
except Exception as err:
traceback.print_exc()
self.log('[ERROR][doOrderJobs_doing]执行[%s]失败' % (order), level='error')
return False, err
else:
return True, ''
def doOrderJobs_afterSuc(self, job, _orderArgs, _order):
pass
def doOrderJobs_afterFaild(self, job, _orderArgs, _order, err=''):
pass
def add_timeLater_callFunc(self, delay: float = 0, callback=None, **kwargs):
if not callback:
return
tornado.ioloop.IOLoop.current().call_later(delay=delay, callback=callback, **kwargs)
def add_callAt_callFunc(self, when: float, callback=None, **kwargs):
if not callback:
return
return tornado.ioloop.IOLoop.current().call_at(when=when, callback=callback, **kwargs)
def add_PeriodicCallback(self, callback: Callable, callback_time: float, rightAwayDo: bool = False,
jitter: float = 0):
if rightAwayDo:
callback()
periodicClass = tornado.ioloop.PeriodicCallback(callback, callback_time, jitter=jitter)
periodicClass.start()
return periodicClass
class TornadoFactory(baseFactory):
def __init__(self, *args, **kwargs):
super(TornadoFactory, self).__init__(*args, **kwargs)
self.httpServer = None
def getAppRouterHandler(self):
return []
def getApplicationConfigs(self):
return dict(
static_path=os.path.join(os.path.dirname(__file__), "..\\static"),
template_path=os.path.join(os.path.dirname(__file__), "..\\template"),
debug=self.debug,
compiled_template_cache=False
)
def initApplication(self):
app = tornado.web.Application(self.getAppRouterHandler(), **self.getApplicationConfigs())
app.factory = self
return app
def doBeforeServerStart(self):
self.curServerStage = ServerStage.readyStart
def doAfterServerStart(self):
self.curServerStage = ServerStage.doing
def run_server(self):
self.log('服务器[%s]正在启动' % (self.serverTag))
self.doBeforeServerStart()
app = self.initApplication()
self.httpServer = tornado.httpserver.HTTPServer(app)
self.httpServer.listen(self.port, '0.0.0.0')
self.httpServer.address = self.address
self.httpServer.port = self.port
self.httpServer.factory = self
tornado.ioloop.PeriodicCallback(self.onHeartbeat, 3000).start()
self.log('服务器[%s]已启动' % (self.serverTag))
self.doAfterServerStart()
tornado.ioloop.IOLoop.current().start()
| [
"[email protected]"
]
| |
ac7db187efbe323fedbbcc02c5b41ba4416264b0 | 57ec8eff01275cdae43243900a422b87836b807b | /arithmetic.py | bc77393ea397e581d0238d878fc231f7fe59759a | []
| no_license | Ihyatt/calculator-1 | 15624b997c1c19a6bf30ac422f55669838ea7115 | 43f5d63eb81398aff3cf10e3d2d77c4296bdddc8 | refs/heads/master | 2021-01-10T16:29:56.063120 | 2016-04-06T19:54:49 | 2016-04-06T19:54:49 | 55,635,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | def add(num1, num2):
""" This is function will add two numbers """
return num1 + num2
def subtract(num1, num2):
""" This function will subtract two numbers """
return num1 - num2
def multiply(num1, num2):
""" This function will multiply two numbers """
return num1 * num2
def divide(num1, num2):
""" This funtion will divide two numbers """
return float(num1) / num2
def square(num1):
"""This function will square a number"""
return num1 ** 2
def cube(num1):
""" This function will cube a number """
return num1 ** 3
def power(num1, num2):
""" This function will return the power of one number by the second number """
return num1 ** num2
def mod(num1, num2):
""" This number will return remainder of two numbers when divided """
return num1 % num2
| [
"[email protected]"
]
| |
2cf45b3d2a4735668005b263d62b610abb28794a | d897c2bc4ba9a84e7e8a2fe3e998d78cd116f920 | /max_seq/gen/generatore | 64aabc5dc9d2141313929567a4aedb167fe56ad7 | []
| no_license | romeorizzi/problemsCMS_for_LaboProg | 8907622744bc89752391024f24025a7e9706501b | 027b1b204efe602461e93d8b1c194a09eb6526cd | refs/heads/master | 2020-04-03T17:33:52.384915 | 2020-03-25T07:10:17 | 2020-03-25T07:10:17 | 155,449,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | #!/usr/bin/env python2
from limiti import *
from varie import *
from sys import argv, exit, stderr
import os
from numpy.random import random, randint, seed as nseed
from random import choice, sample, shuffle, seed as rseed
usage="""Generatore di "prova".
Parametri:
* N (numero)
* S (seed)
Constraint:
* 1 <= N <= %d
""" % (MAXN)
def run(N):
print N
print " ".join(map(str, [randint(-70, 99) for i in xrange(0, N)]))
if __name__ == "__main__":
if len(argv) != 3:
print usage
exit(1)
N, S = map(int, argv[1:])
assert (1 <= N <= MAXN)
# su seed non positivo copia un input di esempio dal .tex
if S <= 0:
print extract_input()[-S],
exit(0)
nseed(S)
rseed(S)
run(N)
| [
"[email protected]"
]
| ||
014cc680e5166aca9a5e8d59a1954d04e1b5f948 | 175e4e031471e5cdbc9bcaee2df10f5ec44871d3 | /LESSON2b/.history/backend/app_20200608192133.py | 7e3f3287df3fda92cff8bcebf4818814b899ee30 | []
| no_license | hiyacins/uma_study | c329d29a9c3899ab4feca21b9c47ef546b69b0bd | 067e66f258a0c89f7670c645dd7c40feee8536fa | refs/heads/master | 2023-01-23T06:40:12.435047 | 2020-06-17T15:59:34 | 2020-06-17T15:59:34 | 239,077,726 | 0 | 0 | null | 2023-01-06T08:36:26 | 2020-02-08T05:56:52 | Python | UTF-8 | Python | false | false | 16,842 | py | # from flask import Flask, render_template
from HiyaLib import *
# DBのテーブルを表現するクラスの基底クラス
class DBTable():
# columns と column_names から要素をひとつずつ取り出したのがvalue , nameとして、
# それを 生成したDBTable派生型のオブジェクトに setattr(object,name,value)する。
# columns:カラムの値が格納されている。
# column_names:カラム名が格納されている。
# 返し値:recordオブジェクトに name属性に value を追加したものを返す。
# (使用例)
# t.from_tuple(self.mysql_cursor.fetchone(), t.sql_select_statement.split(","))
# (必要性)
@classmethod
def from_tuple(cls, columns: Tuple[Tuple], column_names: List[str]) -> "DBTable":
# DBTable派生クラスのインスタンスを作成する。
record = (cls)()
# column_names と columns から要素をひとつずつ取り出して
# name と value に入れる。
# nameとvalueに対して、recordオブジェクトに name属性に value を追加する。
for name, value in zip(column_names, columns):
setattr(record, name, value)
return record
# Tuple[tuple]型の値 を List[map]型の値に変換する。
# columns:Tuple[tuple]型の値((例)((1,'abc),(2,'def)) )を入れる。
# 返し値:Tuple[tuple]型 から List[map]型 に変換して返す。
# (使用例)
# t.from_tuple_of_tuples(self.mysql_cursor.fetchall())
# (必要性)
@classmethod
def from_tuple_of_tuples(cls, columns: Tuple[tuple]) -> "List[map]":
t = cls.sql_select_statement.split(",")
return list(map(lambda x: cls.from_tuple(x, t), columns))
# DBのTODO_ITEMSテーブルの一つのrecordを表現する構造体
class ToDoItem(DBTable):
# TODO_ITEMSテーブルの名前
table_name = "todo_items"
# TODO_ITEMSテーブルの各フィールド名
sql_select_statement = "id,comment"
# TODO_ITEMSテーブルのprimary key設定[ToDo:リスト化]
orm_primary_key = "id"
# TODO_ITEMSテーブルのupdateカラムを設定
orm_update_str = "comment=?"
# TODO_ITEMSテーブルのinsertカラムの値を設定
orm_insert_value = "?"
# TODO_ITEMSテーブルのカラム名を設定
orm_insert_colum = "comment"
# TODO_ITEMSテーブルのカラム名をlistで設定
orm_column_names = ["id", "comment"]
def __init__(self):
# auto_increment , primary
self.id = -1
# ここにToDoの内容が入っている
self.comment = ""
# DBのSITE_USERSテーブルの一つのrecordを表現する構造体
class SiteUser(DBTable):
# SITE_USERSテーブルの名前
table_name = "site_users"
# SITE_USERSテーブルの各フィールド名
sql_select_statement = "id,id_name,password"
# SITE_USERSテーブルのprimary key設定[ToDo:リスト化]
orm_primary_key = "id"
# SITE_USERSテーブルのupdateカラムを設定
orm_update_str = "id_name=?,password=?"
# SITE_USERSテーブルのinsertカラムの値を設定
orm_insert_value = "?,?"
# SITE_USERSテーブルのinsertカラム名を設定
orm_insert_colum = "id_name, password"
# SITE_USERSテーブルのカラム名をlistで設定
orm_column_names = ["id", "id_name", "password"]
def __init__(self):
# auto_increment , primary
self.id = -1
# ここにユーザーIDの内容が入っている
self.id_name = ""
# ここにパスワードの内容が入っている
self.password = ""
# unittest用テーブル
# DBのTEST_TABLEの一つのrecordを表現する構造体
class TestTable(DBTable):
# TEST_TABLEの名前
table_name = "test_tables"
# TEST_TABLEの各フィールド名
sql_select_statement = "id,comment"
# TEST_TABLEのprimary key設定
orm_primary_key = "id"
def __init__(self):
# auto_increment , primary
self.id = -1
# ここにTestcommentの内容が入っている
self.comment = ""
# unittest用テーブル
# DBのTEST_TABLEテーブルの一つのrecordを表現する構造体
class TestTable2(DBTable):
# TEST_TABLEの名前
table_name = "test_tables2"
# TEST_TABLEの各フィールド名
sql_select_statement = "id,id_name,password"
# TEST_TABLEのprimary key設定
orm_primary_key = "id"
def __init__(self):
# auto_increment , primary
self.id = -1
# ここにユーザーIDの内容が入っている
self.id_name = ""
# ここにパスワードの内容が入っている
self.password = ""
# MySQLに接続・切断を行うクラス
class MySQLConnector:
def __init__(self):
# MySQLのコネクタ
self.__mysql_connection = None
# MySQLのカーソル
self.mysql_cursor = None
def __enter__(self):
# DB接続のための情報入力configをjson形式で読み込む。
self.connect(ReadJsonFromFile('exclude/connect_config.json'))
return self
def __exit__(self, ex_type, ex_value, tb):
self.disconnect()
# dict型で接続情報を渡す。
# config: DB接続情報
# 例
# config = {
# 'user': 'root',
# 'password': '****',
# 'host': 'localhost',
# 'port': 3306,
# 'charset': 'utf8',
# 'database': 'tables'
# }
def connect(self, connect_config: dict):
# 二重接続回避
self.disconnect()
# SQLに接続します。
self.__mysql_connection = mysql.connector.connect(**connect_config)
# MySQLConnectorはデフォルトでは更新系のSQL文の発行後にcommit()が必要になるのでAutoCommitをTrueに変更しておく。
self.__mysql_connection.autocommit = True
# カーソルを取得する。
self.mysql_cursor = self.__mysql_connection.cursor(prepared=True)
# DB切断する。
def disconnect(self):
# MySQLのカーソル切断
if self.mysql_cursor is not None:
self.mysql_cursor.close()
self.mysql_cursor = None
# MySQLのコネクトの切断
if self.__mysql_connection is not None:
self.__mysql_connection.close()
self.__mysql_connection = None
# SQL実行
# sql:実行するSQL文を入れる。
# (例)"SELECT id FROM site_users WHERE id = ?"
# param:paramには、sqlとして渡したSQL文の"?"に入る、それぞれの値をtupleにして渡す。
# paramがデフォルト値のときは、第2引数を省略する。
# (例1)db.execute("SELECT id FROM site_users WHERE id = ?", id)
# (例2)db.execute("SELECT id FROM site_users")
def execute(self, sql: str, param=()):
# param が tuple以外のstr,intなどのとき、paramをtupleでくるむ(tupleの1つ目の要素がparamであるtuple化する)。
if not ((type(param) is tuple) or (type(param) is list) or (type(param) is dict)):
param = (param,)
return self.mysql_cursor.execute(sql, param)
# SQLを実行してfetchall()した結果である List[DBTable]型 が返る。
# t:取得したいデータがあるテーブルの一つのrecordを表現する構造体のクラス型を入れる。
# sql_where:条件部分のみの SQL を入れる。デフォルトは""。
# (例)"WHERE id = ?"
# param:paramには、sql として渡したSQL文の "?" に入るそれぞれの値を tuple にして渡す。
# 返し値:List[DBTable]型 が返る。
# (使用例)
# entries = db.select(Entry)
def select(self, t: type, sql_where: str = "", param=()) -> "List[DBTable]":
self.execute(power_join(
[f"SELECT {t.sql_select_statement} FROM {t.table_name}", sql_where]), param)
return t.from_tuple_of_tuples(self.mysql_cursor.fetchall())
# SQLを実行して fetchone() した結果である tuple型 が返る。
# t:取得したいデータがあるテーブルの一つのrecordを表現する構造体のクラス型を入れる。
# sql_where:条件部分のみの SQL を入れる。デフォルトは "" 。
# param:paramには、sql として渡したSQL文の "?" に入るそれぞれの値を tuple にして渡す。
# 返し値:tuple型が返る。
# (使用例)
# result = db.select_one(SiteUser,"WHERE id = ?", id)
def select_one(self, t: type, sql_where: str = "", param=()) -> tuple:
self.execute(power_join(
[f"SELECT {t.sql_select_statement} FROM {t.table_name}", sql_where]), param)
return t.from_tuple(self.mysql_cursor.fetchone(), t.sql_select_statement.split(","))
# [ToDo]:テスト作成
# insert → update を行う時、insertで登録した、最後のidを取得する関数
# param:空タプルをデフォルトで入れている。
# 返し値:tuple型が返る。
# (使用例)
# id = db.select_ex()
def select_ex(self, param=()) -> tuple:
self.execute("SELECT LAST_INSERT_ID()", param)
return self.mysql_cursor.fetchone()
# DELETEを実行する関数
# t:取得したいデータがあるテーブルの一つのrecordを表現する構造体のクラス型を入れる。
# または、クラスのオブジェクト DBTable を入れる。
# (使用例)
# ※DBTable型のとき
# db.delete(todo_item)
# ※ToDoItemクラス型のとき
# db.delete(ToDoItem)
# (必要性) ToDoリストの個別削除と全件削除を実現したいため作成。個別削除では、DBTableを引数に取るが、
# 全件削除では、ToDoItemクラス型を使う。Pythonでは、異なる型を引数にとってもコンパイル時にチェックされていないので
# 関数内で処理することにする。
def delete(self, t):
# t が type型 のときは、全件削除する。
if type(t) is type:
return self.execute(f"DELETE FROM {t.table_name}", param=())
primary_key = t.orm_primary_key
return self.execute(power_join([f"DELETE FROM {t.table_name}", f"WHERE {primary_key} = ?"]), getattr(t, primary_key))
# UPDATEを実行する関数
# t:クラスのオブジェクト DBTable を入れる。
# (使用例)
# items = select_one(ToDoItem, ...)
# items.comment='国語'
# db.update(items)
def update(self, t: DBTable):
# primary_key を取得
primary_key = t.orm_primary_key
# updateカラム取得
update_statements = t.sql_select_statement
update_param = []
update_str = ""
# 取得したupdateカラムはstr型なので、list型に変換して要素をmember_nameに詰め替える。
for member_name in update_statements.split(','):
# primary_key は update 対象にしない。
if member_name != primary_key:
# updateしたいカラムを取り出し、"=?,"を付ける。
update_str += member_name + "=?,"
# updateしたいカラムの値をlistで取り出す。
update_param.append(getattr(t, member_name))
# 最後にprimary_key を追加する。
update_param.append(getattr(t, primary_key))
sql = [
f"UPDATE {t.table_name} SET {update_str.rstrip(',')} WHERE {primary_key} = ?"]
return self.execute(power_join(sql), update_param)
# INSERTを実行する関数
# t:クラスのオブジェクト DBTable を入れる。
# (使用例)
# item = ToDoItem()
# item.id_name = 'hiya'
# item.password = 'uma3141592'
# db.insert(item)
def insert(self, t: DBTable):
# primary_key を取得
primary_key = t.orm_primary_key
# insertカラム取得
insert_statements = t.sql_select_statement
insert_param = []
insert_str = ''
insert_value = ''
# 取得したupdateカラムはstr型なので、list型に変換して要素をmember_nameに詰め替える。
for member_name in insert_statements.split(','):
# primary_key は update 対象にしない。
if member_name != primary_key:
# updateしたいカラムを取り出す。
insert_str += member_name + ","
# updateしたいカラムを"?"で置換する。
insert_value += "?,"
# updateしたいカラムの値をlistで取り出す。
insert_param.append(getattr(t, member_name))
sql = [
f"INSERT INTO {t.table_name} ({insert_str.rstrip(',')}) VALUES ({insert_value.rstrip(',')})"]
return self.execute(power_join(sql), insert_param)
app = FlaskBuilder(__name__)
# ToDoリストで追加されたコメントをDBに登録する。
@app.route('/add', methods=['POST'])
@login_required
def add_todo_item():
todoitem = ToDoItem()
# ToDoフォームのテキストボックスに入力されたテキストを取得する。
todoitem.comment = request.json['comment']
# コメント欄のテキストボックスが空でなければ、SQLを実行する。
# コメント欄のテキストボックスが空なら何もしない。
if todoitem.comment:
with MySQLConnector() as db:
# コメントをDBに登録する。
db.insert(todoitem)
return redirect(url_for('top'))
# ToDoリストに追加されたコメントをDBから1件だけ削除する。
# id : int
# 削除するコメントのid
@app.route('/delete/<int:id>', methods=['POST'])
@login_required
def delete_todo_item(id: int):
with MySQLConnector() as db:
todo_item = db.select_one(
ToDoItem, "WHERE id = ?", id) if id else None
db.delete(todo_item)
flash('削除しました\(^o^)/')
return redirect(url_for('top'))
# DB内のToDoリストをすべて削除する。
@app.route('/all-delete', methods=['POST'])
@login_required
def all_delete_todo_items():
with MySQLConnector() as db:
# ToDoリストをすべて削除する。
db.delete(ToDoItem)
flash('全部削除しました\(^o^)/オワッタ')
return redirect(url_for('top'))
# ログイン成功後の画面(ホーム画面)
@app.route('/')
@login_required
def top():
flash('ログインを成功しました\(^o^)/')
with MySQLConnector() as db:
print("きたよ")
entries = db.select(ToDoItem)
return json.dumps(entries, indent=4)
# print("DQ:", entries.id)
# # json_entries = json.dumps(entries)
# # print("json:", json_entries)
# return jsonify(json_entries)
return render_template('index.html')
# ログイン前画面表示
@app.route('/login', methods=['GET'])
def login_view():
# ログイン画面に表示する。
return render_template('index.html')
# ログイン処理
@app.route('/login', methods=['POST'])
def login():
with MySQLConnector() as db:
# ログインフォームに入力されたユーザーIDとパスワード取得
id_name = request.json['id_name']
password = request.json['password']
# ログインフォームに入力されたユーザーIDをパラメーターに、select_one関数で
# DBのテーブルクラスを入れ、fetchoneをして、値を抽出する。
# ただし、ログインフォームに入力されたユーザーIDが空のときは、Noneを返す。
site_user = db.select_one(
SiteUser, "WHERE id_name = ?", id_name) if id_name else None
# ユーザーIDがDB内に存在し、フォームから入力されたパスワードがDB内のものと一致すれば
# セッションを登録する
LoginOk = site_user is not None and check_password_hash(
site_user.password, password)
app.login(LoginOk)
if not LoginOk:
flash('ログイン失敗:ユーザーIDもしくはパスワードが正しくありません。')
# ログインに成功していれば、ログイン後のページへリダイレクトする。
# ログインに失敗していれば、ログインページにリダイレクトする。(再度表示する)
# return redirect(url_for('top' if LoginOk else 'index'))
return jsonify(LoginOk)
if __name__ == "__main__":
app.run(port=5000, debug=True)
| [
"[email protected]"
]
| |
f2f2a7316f41e31019494e9057e0c5e91b6b7285 | 117f066c80f3863ebef74463292bca6444f9758a | /api/4min/search/python-flask-server/swagger_server/test/test_search_controller.py | 81b88eb6a8b94cfdb820b62714ac0fb1f63005a0 | []
| no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 1,402 | py | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.search_request import SearchRequest
from swagger_server.models.search_response import SearchResponse
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestSearchController(BaseTestCase):
""" SearchController integration test stubs """
def test_search_items_to_client(self):
"""
Test case for search_items_to_client
todo
"""
body = SearchResponse()
response = self.client.open('/v0/search/items_to_client',
method='GET',
data=json.dumps(body),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_search_items_to_items(self):
"""
Test case for search_items_to_items
todo
"""
body = SearchRequest()
response = self.client.open('/v0/search/items_to_items',
method='GET',
data=json.dumps(body),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| [
"[email protected]"
]
| |
cb4bee84110b580c390815e435c430ed3a233fd9 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/test_norm_all.py | c65bff3a7bb397ea896da928581078f9b6a1e6c5 | [
"Apache-2.0"
]
| permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 21,615 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
# hack method for test p_norm final state
def p_norm_python_api(x,
p=2.0,
axis=-1,
epsilon=1e-12,
keepdim=False,
as_vector=False):
if in_dygraph_mode():
return _C_ops.final_state_p_norm(x, p, axis, epsilon, keepdim,
as_vector)
if _in_legacy_dygraph():
return _C_ops.p_norm(x, 'axis', axis, 'porder', float(p), 'keepdim',
keepdim, 'epsilon', epsilon, 'as_vector',
as_vector)
def p_norm(x, axis, porder, keepdims=False, reduce_all=False):
r = []
if axis is None or reduce_all:
x = x.flatten()
if porder == np.inf:
r = np.amax(np.abs(x), keepdims=keepdims)
elif porder == -np.inf:
r = np.amin(np.abs(x), keepdims=keepdims)
else:
r = np.linalg.norm(x, ord=porder, keepdims=keepdims)
elif isinstance(axis, list or tuple) and len(axis) == 2:
if porder == np.inf:
axis = tuple(axis)
r = np.amax(np.abs(x), axis=axis, keepdims=keepdims)
elif porder == -np.inf:
axis = tuple(axis)
r = np.amin(np.abs(x), axis=axis, keepdims=keepdims)
elif porder == 0:
axis = tuple(axis)
r = x.astype(bool)
r = np.sum(r, axis, keepdims=keepdims)
elif porder == 1:
axis = tuple(axis)
r = np.sum(np.abs(x), axis, keepdims=keepdims)
else:
axis = tuple(axis)
xp = np.power(np.abs(x), porder)
s = np.sum(xp, axis=axis, keepdims=keepdims)
r = np.power(s, 1.0 / porder)
else:
if isinstance(axis, list):
axis = tuple(axis)
r = np.linalg.norm(x, ord=porder, axis=axis, keepdims=keepdims)
r = r.astype(x.dtype)
return r
def frobenius_norm(x, axis=None, keepdims=False):
if isinstance(axis, list): axis = tuple(axis)
if axis is None: x = x.reshape(1, x.size)
r = np.linalg.norm(x, ord='fro', axis=axis,
keepdims=keepdims).astype(x.dtype)
return r
def final_state_frobenius_norm(x, dim, keep_dim, reduce_all):
return paddle.linalg.norm(x, p='fro', axis=dim, keepdim=keep_dim)
class TestFrobeniusNormOp(OpTest):
def setUp(self):
self.python_api = final_state_frobenius_norm
self.op_type = "frobenius_norm"
self.init_test_case()
x = (np.random.random(self.shape) + 1.0).astype(self.dtype)
norm = frobenius_norm(x, self.axis, self.keepdim)
self.reduce_all = (len(self.axis) == len(self.shape))
self.inputs = {'X': x}
self.attrs = {
'dim': list(self.axis),
'keep_dim': self.keepdim,
'reduce_all': self.reduce_all
}
self.outputs = {'Out': norm}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = (1, 2)
self.keepdim = False
self.dtype = "float64"
class TestFrobeniusNormOp2(TestFrobeniusNormOp):
def init_test_case(self):
self.shape = [5, 5, 5]
self.axis = (0, 1)
self.keepdim = True
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
class TestPnormOp(OpTest):
def setUp(self):
self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case()
x = (np.random.random(self.shape) + 0.5).astype(self.dtype)
norm = p_norm(x, self.axis, self.porder, self.keepdim, self.asvector)
self.inputs = {'X': x}
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'keepdim': self.keepdim,
'porder': float(self.porder),
'asvector': self.asvector
}
self.outputs = {'Out': norm}
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = 1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float64"
self.asvector = False
def calc_gradient(self):
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'keepdim': self.keepdim,
'porder': float(self.porder),
'asvector': self.asvector
}
x = self.inputs["X"]
porder = self.attrs["porder"]
axis = self.attrs["axis"]
asvector = self.attrs["asvector"]
x_dtype = x.dtype
x = x.astype(np.float32) if x.dtype == np.float16 else x
if porder == 0:
grad = np.zeros(x.shape).astype(x.dtype)
elif porder in [float("inf"), float("-inf")]:
norm = p_norm(x,
axis=axis,
porder=porder,
keepdims=True,
reduce_all=asvector)
x_abs = np.abs(x)
grad = np.sign(x)
grad[x_abs != norm] = 0.0
else:
norm = p_norm(x,
axis=axis,
porder=porder,
keepdims=True,
reduce_all=asvector)
grad = np.power(norm, 1 - porder) * np.power(
np.abs(x), porder - 1) * np.sign(x)
numel = 1
for s in x.shape:
numel *= s
divisor = numel if asvector else x.shape[axis]
numel /= divisor
return [grad.astype(x_dtype) * 1 / numel]
class TestPnormOp2(TestPnormOp):
def init_test_case(self):
self.shape = [3, 20, 3]
self.axis = 2
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestPnormOp3(TestPnormOp):
def init_test_case(self):
self.shape = [3, 20, 3]
self.axis = 2
self.epsilon = 1e-12
self.porder = np.inf
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestPnormOp4(TestPnormOp):
def init_test_case(self):
self.shape = [3, 20, 3]
self.axis = 2
self.epsilon = 1e-12
self.porder = -np.inf
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestPnormOp5(TestPnormOp):
def init_test_case(self):
self.shape = [3, 20, 3]
self.axis = 2
self.epsilon = 1e-12
self.porder = 0
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestPnormOp6(TestPnormOp):
def init_test_case(self):
self.shape = [3, 20, 3]
self.axis = -1
self.epsilon = 1e-12
self.porder = 2
self.keepdim = False
self.dtype = "float32"
self.asvector = True
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestPnormOpFP16(TestPnormOp):
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = 1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float16"
self.asvector = False
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(place, ['X'],
'Out',
user_defined_grads=self.gradient)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestPnormOpFP161(TestPnormOpFP16):
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = -1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float16"
self.asvector = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestPnormBF16Op(OpTest):
def setUp(self):
self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case()
self.x = (np.random.random(self.shape) + 0.5).astype(np.float32)
self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim,
self.asvector)
self.gradient = self.calc_gradient()
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'keepdim': self.keepdim,
'porder': float(self.porder),
'asvector': self.asvector
}
self.outputs = {'Out': convert_float_to_uint16(self.norm)}
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = 1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = np.uint16
self.asvector = False
def calc_gradient(self):
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'keepdim': self.keepdim,
'porder': float(self.porder),
'asvector': self.asvector
}
x = self.x
porder = self.attrs["porder"]
axis = self.attrs["axis"]
asvector = self.attrs["asvector"]
x_dtype = x.dtype
x = x.astype(np.float32) if x.dtype == np.float16 else x
if porder == 0:
grad = np.zeros(x.shape).astype(x.dtype)
elif porder in [float("inf"), float("-inf")]:
norm = p_norm(x,
axis=axis,
porder=porder,
keepdims=True,
reduce_all=asvector)
x_abs = np.abs(x)
grad = np.sign(x)
grad[x_abs != norm] = 0.0
else:
norm = p_norm(x,
axis=axis,
porder=porder,
keepdims=True,
reduce_all=asvector)
grad = np.power(norm, 1 - porder) * np.power(
np.abs(x), porder - 1) * np.sign(x)
numel = 1
for s in x.shape:
numel *= s
divisor = numel if asvector else x.shape[axis]
numel /= divisor
return [grad.astype(x_dtype) * 1 / numel]
def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = frobenius_norm(np_input, axis=axis, keepdims=keep_dim)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
if keep_dim and check_dim:
self.assertEqual(
(np.abs(np.array(result.shape) - np.array(expected_result.shape)) <
1e-6).all(), True)
def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = p_norm(np_input,
porder=p,
axis=axis,
keepdims=keep_dim).astype(dtype)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
if keep_dim and check_dim:
self.assertEqual(
(np.abs(np.array(result.shape) - np.array(expected_result.shape)) <
1e-6).all(), True)
def run_graph(self, p, axis, shape_x, dtype):
paddle.disable_static()
shape = [2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
out_pnorm = paddle.norm(x, p=2, axis=-1)
# compute frobenius norm along last two dimensions.
out_fro = paddle.norm(x, p='fro')
out_fro = paddle.norm(x, p='fro', axis=0)
out_fro = paddle.norm(x, p='fro', axis=[0, 1])
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.norm(x, p=2, axis=[0, 1])
out_pnorm = paddle.norm(x, p=2)
#out_pnorm = [17.43559577 16.91153453 16.73320053 16.91153453]
# compute inf-order norm
out_pnorm = paddle.norm(x, p=np.inf)
#out_pnorm = [12.]
out_pnorm = paddle.norm(x, p=np.inf, axis=0)
#out_pnorm = [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
# compute -inf-order norm
out_pnorm = paddle.norm(x, p=-np.inf)
#out_pnorm = [0.]
out_pnorm = paddle.norm(x, p=-np.inf, axis=0)
# out_fro = [17.43559577 16.91153453 16.73320053 16.91153453]
paddle.enable_static()
class API_NormTest(unittest.TestCase):
def test_basic(self):
keep_dims = {False, True}
for keep in keep_dims:
run_fro(self,
p='fro',
axis=None,
shape_x=[2, 3, 4],
dtype="float32",
keep_dim=keep)
run_fro(self,
p='fro',
axis=[0, 1],
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=2,
axis=None,
shape_x=[3, 4],
dtype="float32",
keep_dim=keep)
run_pnorm(self,
p=2,
axis=1,
shape_x=[3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=np.inf,
axis=0,
shape_x=[2, 3, 4],
dtype="float32",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=np.inf,
axis=None,
shape_x=[2, 3, 4],
dtype="float32",
keep_dim=keep)
run_pnorm(self,
p=-np.inf,
axis=0,
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=-np.inf,
axis=None,
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep)
run_pnorm(self,
p=0,
axis=1,
shape_x=[3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=1,
axis=1,
shape_x=[3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=0,
axis=None,
shape_x=[3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=2,
axis=[0, 1],
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=2,
axis=-1,
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=1,
axis=[0, 1],
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=np.inf,
axis=[0, 1],
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
run_pnorm(self,
p=-np.inf,
axis=[0, 1],
shape_x=[2, 3, 4],
dtype="float64",
keep_dim=keep,
check_dim=True)
def test_dygraph(self):
run_graph(self, p='fro', axis=None, shape_x=[2, 3, 4], dtype="float32")
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[10, 10], dtype="float32")
y_1 = paddle.norm(x, p='fro', name='frobenius_name')
y_2 = paddle.norm(x, p=2, name='pnorm_name')
self.assertEqual(('frobenius_name' in y_1.name), True)
self.assertEqual(('pnorm_name' in y_2.name), True)
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
def err_dtype(p, shape_x, xdtype, out=None):
data = fluid.data(shape=shape_x, dtype=xdtype)
paddle.norm(data, p=p, out=out)
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64")
self.assertRaises(ValueError, paddle.norm, "inf", [2], "int64")
out = fluid.data(name="out", shape=[1], dtype="int64")
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "float64",
out)
self.assertRaises(TypeError, err_dtype, 2, [10], "int64")
self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out)
data = fluid.data(name="data_2d", shape=[2, 2], dtype="float64")
self.assertRaises(ValueError, paddle.norm, data, p="unsupport norm")
self.assertRaises(ValueError, paddle.norm, data, p=[1])
self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1)
self.assertRaises(ValueError, paddle.norm, 0, [1, 0], "float64")
data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64")
self.assertRaises(ValueError,
paddle.norm,
data,
p='unspport',
axis=[-3, -2, -1])
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| [
"[email protected]"
]
| |
2d590385ddba87310af206021f891c91aea028ef | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py | de4a8a5e9f030f1e8a8802596885186163f23eed | [
"Apache-2.0"
]
| permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 368 | py | _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
model = dict(
pretrained='open-mmlab://resnet101_v1c',
backbone=dict(
depth=101,
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
multi_grid=(1, 2, 4)),
decode_head=dict(
dilations=(1, 6, 12, 18),
sampler=dict(type='OHEMPixelSampler', min_kept=100000)))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.