blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
00935ac28db148eb7bb1523af3c2be6cecafadc1 | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /python/torch/list_cuda.py | c612de25cf8031c6597471eb13d2694a2b9c0425 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | #!/usr/bin/python3
#\file list_cuda.py
#\brief List available CUDA devices.
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
#\date Oct.01, 2021
import torch
if __name__=='__main__':
print('Number of CUDA devices:', torch.cuda.device_count())
for i in range(torch.cuda.device_count()):
print(' cuda:{}: {}'.format(i,torch.cuda.get_device_name('cuda:{}'.format(i))))
| [
"[email protected]"
] | |
7c2c9ea0efe833ed58a22a41c2794aab4ea6fc29 | c5983e05a4c04cb8cbb48e97e279a69787c6ffb6 | /backend/manage.py | 7d1ab071b19068b2c62e9b54d07531f62df29a26 | [] | no_license | crowdbotics-apps/gffxgx-19180 | 2377637af6b1dc912b60506eb99f5dd921ac66f6 | 60a37a5a947ae29dbfcd8c548c31b2e22af4ce13 | refs/heads/master | 2022-11-27T04:03:00.944208 | 2020-07-25T23:22:09 | 2020-07-25T23:22:09 | 282,541,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gffxgx_19180.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
16dda99ae80461e4ee1c71b3658657c88f169871 | 65d891b90e59161c2fadff22dcc29f6ec7a789f4 | /presidenciaveis/presidenciaveis/models/__init__.py | 13a7e4ed1ad001ab4b15a52c0aa4ca35b94a8676 | [] | no_license | nizbel/propostas-candidatos-bbc | 32d3184d5f10bc50e2d79c686da2ea3daddbe93b | 82e60d978e6a01925fbfb49d370f7d088f588585 | refs/heads/master | 2023-05-26T03:22:27.050795 | 2020-06-29T00:26:40 | 2020-06-29T00:26:40 | 275,682,883 | 0 | 0 | null | 2021-06-10T23:06:30 | 2020-06-28T23:00:38 | Python | UTF-8 | Python | false | false | 24 | py | from candidatos import * | [
"[email protected]"
] | |
5ad34a82314714deb74abfef98c2187642cbb641 | ac1fdf53359b53e183fb9b2602328595b07cf427 | /ParlAI/parlai/mturk/tasks/turn_annotations/constants.py | 643cf8e3310559f96493d3bed502bd3c31b66074 | [] | no_license | Ufukdogann/MasterThesis | 780410c5df85b789136b525bce86ba0831409233 | b09ede1e3c88c4ac3047800f5187c671eeda18be | refs/heads/main | 2023-01-24T18:09:52.285718 | 2020-11-27T16:14:29 | 2020-11-27T16:14:29 | 312,416,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f604cb6189404aba2ec507ce5de08423d480fef202b1e438be8dfa3fbc7537bb
size 3113
| [
"134679852Ufuk*"
] | 134679852Ufuk* |
eeacb3481567c6c734549b300e411fe3b2b860d9 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/eager/remote_test.py | edd00bc1f3f14fe6cc0d3a71becd864c3a8016f0 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 8,185 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
class SingleWorkerTest(test.TestCase):
def setUp(self):
super(SingleWorkerTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def testMultiDeviceFunctionBasic(self):
@def_function.function
def basic(i):
with ops.device('/job:localhost/replica:0/task:0/cpu:0'):
a = constant_op.constant([2]) + i
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
b = constant_op.constant([1])
return a + b
self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5])
self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4])
def testMultiDeviceFunctionVariable(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def with_variable(i):
return i + variable_b
self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
return variable_b, i + variable_b
with self.assertRaises(errors.UnimplementedError) as cm:
remote_output(constant_op.constant([1]))
self.assertIn(
'Currently, outputting tensors on remote devices is not supported.',
cm.exception.message)
def testMultiDeviceFunctionAmbiguousDevice(self):
@def_function.function
def ambiguous_device(i):
with ops.device('cpu:0'):
return i + constant_op.constant([2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
self.assertAllEqual(
ambiguous_device(constant_op.constant([2])).numpy(), [3])
self.assertIn('the output node must match exactly one device',
cm.exception.message)
class MultiWorkersTest(test.TestCase):
def setUp(self):
super(MultiWorkersTest, self).setUp()
workers, _ = test_util.create_local_cluster(3, 0)
remote.connect_to_remote_host(
[workers[0].target, workers[1].target, workers[2].target])
def testMultiDeviceFunctionOnLocalDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceFunctionOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
context.context().mirroring_policy = context.MIRRORING_NONE
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
context.context().mirroring_policy = context.MIRRORING_ALL
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceWhileLoopOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
def body(i, _):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
return a + 1.0, 1
return control_flow_ops.while_loop_v2(lambda _, d: d < 1, body, [i, 0])[0]
context.context().mirroring_policy = context.MIRRORING_NONE
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
context.context().mirroring_policy = context.MIRRORING_ALL
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testSimpleParameterServer(self):
with ops.device('/job:worker/task:2/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
_GRPC_PREFIX = 'grpc://'
class MultiJobsTest(test.TestCase):
def setUp(self):
super(MultiJobsTest, self).setUp()
workers, ps = test_util.create_local_cluster(2, 1)
cluster = {
'my_worker': [
_strip_prefix(workers[0].target, _GRPC_PREFIX),
_strip_prefix(workers[1].target, _GRPC_PREFIX),
],
'my_ps': [_strip_prefix(ps[0].target, _GRPC_PREFIX)],
}
remote.connect_to_cluster(server_lib.ClusterSpec(cluster))
def testSimpleParameterServer(self):
with ops.device('/job:my_ps/task:0/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
def _strip_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
44719b0024dc4b15ac8a0ec83ddb63cfba8e3093 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/d278160c1fd0e8c3b24ee96c4fd91dddbbfab668-<_print_figure_tex>-bug.py | 60b91491bf1a7503ebe33d299abe5c10dfd4340c | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,481 | py | def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor, orientation, isLandscape, papertype, metadata=None, **kwargs):
"\n If text.usetex is True in rc, a temporary pair of tex/eps files\n are created to allow tex to manage the text layout via the PSFrags\n package. These files are processed to yield the final ps or eps file.\n\n metadata must be a dictionary. Currently, only the value for\n the key 'Creator' is used.\n "
isEPSF = (format == 'eps')
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
else:
raise ValueError('outfile must be a path or a file-like object')
self.figure.dpi = 72
(width, height) = self.figure.get_size_inches()
xo = 0
yo = 0
(l, b, w, h) = self.figure.bbox.bounds
llx = xo
lly = yo
urx = (llx + w)
ury = (lly + h)
bbox = (llx, lly, urx, ury)
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get('dryrun', False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
_bbox_inches_restore = kwargs.pop('bbox_inches_restore', None)
ps_renderer = self._renderer_class(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure, width, height, dpi, ps_renderer, bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun:
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
if ((metadata is not None) and ('Creator' in metadata)):
creator_str = metadata['Creator']
else:
creator_str = (('matplotlib version ' + __version__) + ', http://matplotlib.org/')
(fd, tmpfile) = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
print('%!PS-Adobe-3.0 EPSF-3.0', file=fh)
if title:
print(('%%Title: ' + title), file=fh)
print(('%%Creator: ' + creator_str), file=fh)
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch:
source_date = datetime.datetime.utcfromtimestamp(int(source_date_epoch)).strftime('%a %b %d %H:%M:%S %Y')
else:
source_date = time.ctime()
print(('%%CreationDate: ' + source_date), file=fh)
print(('%%%%BoundingBox: %d %d %d %d' % bbox), file=fh)
print('%%EndComments', file=fh)
Ndict = len(psDefs)
print('%%BeginProlog', file=fh)
print(('/mpldict %d dict def' % Ndict), file=fh)
print('mpldict begin', file=fh)
for d in psDefs:
d = d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print('end', file=fh)
print('%%EndProlog', file=fh)
print('mpldict begin', file=fh)
print(('%s translate' % _nums_to_str(xo, yo)), file=fh)
print(('%s clipbox' % _nums_to_str((width * 72), (height * 72), 0, 0)), file=fh)
print(self._pswriter.getvalue(), file=fh)
print('end', file=fh)
print('showpage', file=fh)
fh.flush()
if isLandscape:
isLandscape = True
(width, height) = (height, width)
bbox = (lly, llx, ury, urx)
if isEPSF:
(paperWidth, paperHeight) = self.figure.get_size_inches()
if isLandscape:
(paperWidth, paperHeight) = (paperHeight, paperWidth)
else:
temp_papertype = _get_papertype(width, height)
if (papertype == 'auto'):
papertype = temp_papertype
(paperWidth, paperHeight) = papersize[temp_papertype]
else:
(paperWidth, paperHeight) = papersize[papertype]
if (((width > paperWidth) or (height > paperHeight)) and isEPSF):
(paperWidth, paperHeight) = papersize[temp_papertype]
verbose.report(('Your figure is too big to fit on %s paper. %s paper will be used to prevent clipping.' % (papertype, temp_papertype)), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag, font_preamble, custom_preamble, paperWidth, paperHeight, orientation)
if (rcParams['ps.usedistiller'] == 'ghostscript'):
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
elif (rcParams['ps.usedistiller'] == 'xpdf'):
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False:
pass
else:
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
if is_writable_file_like(outfile):
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode) | [
"[email protected]"
] | |
6e4c7c1f9873362de85924eedfa8590b9c5b2ebb | d7ee76b7f1d6cd038982335792f15959a58a8395 | /SWEA/3234. 준환이의 양팔저울.py | 4a705c6bef72acc0c16a6712a19c0da4eed31725 | [] | no_license | min1378/-algorithm | 1c5dea6b2f03e4d376275cfccbf11b240bc659d9 | bfb720277160077a816deec21469a7e597c62d14 | refs/heads/master | 2021-08-02T06:54:10.478501 | 2021-07-31T14:03:01 | 2021-07-31T14:03:01 | 202,688,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | def make_per(k):
if k == N:
scale(0, 0, 0)
else:
for i in range(N):
if visited[i]:
continue
visited[i] = True
per[k] = mass[i]
make_per(k + 1)
visited[i] = False
def scale(k, left, right):
global cnt
if k == N:
cnt += 1
return
else:
a = per[k]
k += 1
if left + a >= half and k < N:
remain = (N - k)
cnt += (2 ** remain)
else:
scale(k, left + a, right)
if left >= right + a:
if left >= half and k < N:
remain = (N - k)
cnt += (2 ** remain)
return
scale(k, left, right + a)
T = int(input())
for tc in range(1, T + 1):
N = int(input())
mass = list(map(int, input().split()))
mass_sum = sum(mass)
if mass_sum % 2 == 0:
half = mass_sum // 2
else:
half = mass_sum // 2 + 1
left = 0
right = 0
cnt = 0
k = 0
visited = [False] * N
per = [0] * N
make_per(k)
print("#%d %d" % (tc, cnt)) | [
"[email protected]"
] | |
7a29db382b00fe53bf9504d4daf3d1a6dc420316 | d85043257d93d35ac5d20fdb784656a83e141350 | /pvd_309/plot.py | 1c6012af46ef3494245ccbf00ece9e6a22ff1ed3 | [] | no_license | CINF/cinfdata_test | ca7ae74c93afb2860c2fa24d6589e25ed5c7d38a | 159c5e7f4727318a6b7b78dcce8f0ea57353abdb | refs/heads/master | 2021-01-01T04:26:36.569960 | 2016-05-13T11:10:43 | 2016-05-13T11:10:43 | 58,448,151 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | ../sym-files2/plot.py | [
"[email protected]"
] | |
e2e9258cf1c302d8bd7e00834cca4cb299126b6b | a95cf706c3111069c75055d558a710dfe8538195 | /collective/dexteritytextindexer/tests/test_utils.py | ee435d7e486a1d2b1037d5c9026fa6f0d0928a0f | [] | no_license | gusunavarro/collective.dexteritytextindexer | 2854409eff0f843be8ed92febbbb6698e452c4d4 | 34394c1c9b2016a14985ae3314d45b3a695790eb | refs/heads/master | 2021-01-16T21:28:00.404619 | 2012-03-16T11:49:09 | 2012-03-16T11:49:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | from collective.dexteritytextindexer.directives import SEARCHABLE_KEY
from collective.dexteritytextindexer.utils import searchable
from plone.directives import form
from plone.supermodel.utils import mergedTaggedValueList
from unittest2 import TestCase
from zope import schema
class IExample(form.Schema):
foo = schema.TextLine(title=u'foo')
class TestUtils(TestCase):
"""Test utils module.
"""
def test_marking_field_as_searchable(self):
self.assertEquals([], mergedTaggedValueList(IExample, SEARCHABLE_KEY))
searchable(IExample, u'foo')
self.assertEquals([(IExample, 'foo', 'true')],
mergedTaggedValueList(IExample, SEARCHABLE_KEY))
| [
"[email protected]"
] | |
04ec7b26a157c4907b015ca5d1c3f74ae18fd6f0 | 4ad0cfa350552458df8a0270038ed436bd1d06f4 | /interface/login.py | c25249f7829acc58a9d598047ec4c22ca03e182d | [] | no_license | fzk466569/python_tkinter | 4b2e505f91bc4f73d632bb4fe029bd3a3b07c590 | 8c63ac171d171cd13c7891426841279f2ef53262 | refs/heads/master | 2021-01-21T11:26:38.127214 | 2017-08-31T13:15:27 | 2017-08-31T13:15:27 | 102,001,271 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | from tkinter import *
import tkinter.messagebox
from repository.user import login_check
from interface.main_form import MainForm
class Login(object):
def __init__(self):
self.login = Tk()
self.login.title('基于webshell的校园网络安全系统')
self.login.iconbitmap('../images/title.ico')
input = LabelFrame(self.login, text='输入你的个人账号', padx=5, pady=5)
input.pack(padx=10, pady=10)
Label(input, text='账号:').grid(row=0, column=0, sticky=W, padx=5, pady=10)
Label(input, text='密码:').grid(row=1, column=0, sticky=W, padx=5, pady=10)
self.username = Entry(input)
self.username.grid(row=0, column=1, padx=5, pady=10)
self.password = Entry(input, show='*')
self.password.grid(row=1, column=1, padx=5, pady=10)
commit = Button(input, text='提交', width=10,
command=self.confirm)
commit.grid(row=2, columnspan=3, pady=5)
mainloop()
def confirm(self):
name = self.username.get()
passwd = self.password.get()
if login_check(name, passwd):
self.login.destroy()
# self.login.withdraw()
MainForm()
else:
tkinter.messagebox._show(title='ERROR!', message='账号或密码错误')
if __name__ == '__main__':
Login()
| [
"fzk466569"
] | fzk466569 |
298c771a14c37a067196a174028e535fb052e119 | 47d1beba77ebde115c5d41b25a15ef144068c930 | /news/forms.py | 886b4b2d2460532d39973216f0431677c60234a9 | [] | no_license | uchicago-library/library_website | f32d7dcaf793b4646cac37ba7270715dccf84820 | e5912a17ed2de3a61ede2fbebda4a258664ff696 | refs/heads/master | 2023-08-16T20:20:45.063253 | 2023-08-10T21:19:12 | 2023-08-10T21:19:12 | 39,917,251 | 5 | 4 | null | 2023-08-10T21:19:14 | 2015-07-29T21:27:58 | Python | UTF-8 | Python | false | false | 405 | py | from django import forms
class EmailNotificationTestForm(forms.Form):
email_from = forms.EmailField(label='From email address:')
email_to = forms.EmailField(label='To email address:')
num_days = forms.IntegerField(label='Number of days of news stories to summarize:')
email_as_if_date = forms.DateField(label='Send the message as if it were the following date. (Use YYYY-MM-DD format.)')
| [
"[email protected]"
] | |
6cbb7370e69db54a2b4d182c802e8fcd78aa82c8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_cosmologies.py | e9122b4cb835c96b392e497a2cc11aa129f2ef0e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _COSMOLOGIES():
def __init__(self,):
self.name = "COSMOLOGIES"
self.definitions = cosmology
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cosmology']
| [
"[email protected]"
] | |
1c71026b627e3c90fc1188ef5c29c34a20fee5de | 991cd70073c162f637fbec3a9e921707aa434b8e | /opentelemetry-resourcedetector-gcp/src/opentelemetry/resourcedetector/gcp_resource_detector/_gke.py | 85588604c1b20ff45c51d068a2f19032307303ed | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/opentelemetry-operations-python | 700a59c954cd18ae6428032339d01908580a4f2d | b0ca7decb6a5bb01409822e746b7463f4a7a76ba | refs/heads/main | 2023-08-18T11:24:59.282098 | 2023-08-15T17:02:54 | 2023-08-15T17:02:54 | 244,484,614 | 49 | 42 | Apache-2.0 | 2023-09-01T14:42:48 | 2020-03-02T22:00:22 | Python | UTF-8 | Python | false | false | 1,677 | py | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
from opentelemetry.resourcedetector.gcp_resource_detector import (
_gce,
_metadata,
)
# TODO: remove when Python 3.7 is dropped
from typing_extensions import Literal
KUBERNETES_SERVICE_HOST_ENV = "KUBERNETES_SERVICE_HOST"
def on_gke() -> bool:
return os.environ.get(KUBERNETES_SERVICE_HOST_ENV) is not None
def host_id() -> str:
return _gce.host_id()
def cluster_name() -> str:
return _metadata.get_metadata()["instance"]["attributes"]["cluster-name"]
@dataclass
class ZoneOrRegion:
type: Literal["zone", "region"]
value: str
def availability_zone_or_region() -> ZoneOrRegion:
cluster_location = _metadata.get_metadata()["instance"]["attributes"][
"cluster-location"
]
hyphen_count = cluster_location.count("-")
if hyphen_count == 1:
return ZoneOrRegion(type="region", value=cluster_location)
if hyphen_count == 2:
return ZoneOrRegion(type="zone", value=cluster_location)
raise Exception(
f"unrecognized format for cluster location: {cluster_location}"
)
| [
"[email protected]"
] | |
064eff60e1077689e1a891e530570737476af76d | 749faa57b9adbe1ee762b0ad0e5f8fa1f71e1f20 | /python/tests/utils_test.py | a552d125000c72741270f610a5266754f5b25655 | [
"Apache-2.0"
] | permissive | bitcard/olm-mirror | 8c97257bdca8f9083a0234fd37e941883b298a9d | 769d013ef7b20757d2f83ab2e933f660e38de2a7 | refs/heads/master | 2022-01-05T22:09:17.326695 | 2019-04-30T22:25:21 | 2019-04-30T22:25:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | import base64
import hashlib
from future.utils import bytes_to_native_str
from hypothesis import given
from hypothesis.strategies import text
from olm import sha256
from olm._compat import to_bytes
class TestClass(object):
@given(text(), text())
def test_sha256(self, input1, input2):
first_hash = sha256(input1)
second_hash = sha256(input2)
hashlib_hash = base64.b64encode(
hashlib.sha256(to_bytes(input1)).digest()
)
hashlib_hash = bytes_to_native_str(hashlib_hash[:-1])
if input1 == input2:
assert first_hash == second_hash
else:
assert first_hash != second_hash
assert hashlib_hash == first_hash
| [
"[email protected]"
] | |
c6e39b32c297764a3fc74e78ce267124c7b3f7c6 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1234.py | a61dcbaea83a919bfe4685829b0e23fb28706788 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,126 | py | # qubit number=5
# total number=49
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[4],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.z(input_qubit[4]) # number=33
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[4],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(-1.0430087609918113,input_qubit[4]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.x(input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=46
prog.cz(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=48
prog.x(input_qubit[1]) # number=10
prog.rx(-0.06597344572538572,input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[4]) # number=35
prog.h(input_qubit[0]) # number=17
prog.rx(2.4912829742967055,input_qubit[2]) # number=26
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1234.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
3342dd166c1a4b52cda6b0d424dc3bfcc3d8b674 | 677002b757c0a1a00b450d9710a8ec6aeb9b9e9a | /tiago_public_ws/build/openslam_gmapping/catkin_generated/pkg.installspace.context.pc.py | 00673387ed81d830eb08b81f6a79356ed99d2359 | [] | no_license | mrrocketraccoon/tiago_development | ce686c86459dbfe8623aa54cf4279021342887fb | a0539bdcf21b67ab902a4649b516dcb929c54042 | refs/heads/main | 2023-06-16T19:39:33.391293 | 2021-07-08T21:20:03 | 2021-07-08T21:20:03 | 384,249,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgridfastslam;-lscanmatcher;-lsensor_base;-lsensor_range;-lsensor_odometry;-lutils".split(';') if "-lgridfastslam;-lscanmatcher;-lsensor_base;-lsensor_range;-lsensor_odometry;-lutils" != "" else []
PROJECT_NAME = "openslam_gmapping"
PROJECT_SPACE_DIR = "/tiago_public_ws/install"
PROJECT_VERSION = "0.1.2"
| [
"[email protected]"
] | |
476f0c57827cb630f96d2cdcc3a9bbfa4bc31fe1 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1572.py | 9508e1481ef4684c791d1baaeeb34cc17137599f | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/60000/CCBE71A8-587E-8343-83C1-092BC35EC378.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1572.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
5c9577919a2bb8b1da8842933a4f2f8656fe7c2c | c63fa9ad899c461aa6550a5404ffddb2d868a674 | /scripts/simulator_scripts/simple_international_simulator.py | cf96c290d24922ef5d7c76a84d31d714f0a0e017 | [] | no_license | cooperoelrichs/model_of_australia | 4f7dc065f8afad456c9700a33f11399731c91eff | 40eb9d8bf093aa7af0ae939108e1ed13fe08e6a2 | refs/heads/master | 2021-07-20T03:43:29.293008 | 2017-10-26T04:30:14 | 2017-10-26T04:30:14 | 103,367,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | n_years = 20
n_iter = 1e4
simple_internation_gdp_sim = SimpleInternationalGDPSimulator.run(
un_gdp_pc['Australia'].values,
shared_variance_international_gdp_model_parameters,
n_years, n_iter
)
subset = un_gdp_pc.columns.difference(['date'])
fig = plt.figure(figsize=(20, 10))
plt.plot(un_gdp_pc['date'], un_gdp_pc[subset]) # un_gdp_pc['Australia'])
date_range = pd.date_range(un_gdp_pc['date'].max() + pd.Timedelta(1, 'Y'), periods=n_years, freq='BAS')
plt.plot(date_range, simple_internation_gdp_sim.T)
plt.show()
print('Done.')
| [
"[email protected]"
] | |
137013bc6c827f23d04f9a51bc94c8f868126e7f | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nntunisian.py | 247a867961e9f4f0a39b6810bca6b24b970ef959 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 24 | py | ii = [('CoopJBT.py', 2)] | [
"[email protected]"
] | |
454ad9a3d78b7229f76b2ccc28782e5ec2a0a0a2 | ecf62aae48e02420cd99008f58c4725c6da56d22 | /models/city.py | 3cf1c50199c3fed43886a70749accf1730c301a9 | [] | no_license | ThibautBernard/AirBnB_clone | e3110415acd98b56134928eee0d2befb6bd68a25 | d495dd85add4332880eacf00b338704c2799d3e5 | refs/heads/main | 2023-03-08T15:51:46.968249 | 2021-03-03T15:58:29 | 2021-03-03T15:58:29 | 337,568,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #!/usr/bin/python3
from models.base_model import BaseModel
"""
Class that represent a city
"""
class City(BaseModel):
state_id = ""
name = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| [
"[email protected]"
] | |
b5e0a9f369d3fccfbf3ceeae1aacce0df53aed4a | e3f64d087afb4f6dfd09940370d77e724a1886d7 | /ex12.py | 30b8c31a817dec36e526eb26c0ebe73fc45b2122 | [] | no_license | wangyi26/lpthw | 41e95d0414bb706b8c85d61737be982cd8c712f5 | 8e8705bf5f0eb070cacf7e82648d70c5637e6ec4 | refs/heads/master | 2020-03-26T19:27:17.014680 | 2018-08-19T02:04:16 | 2018-08-19T02:04:16 | 145,265,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | age = input("How old are you?")
height = input("How old are you?")
weight = input("How much do you weigh?")
print(f"So,you're {age} old,{height} tall and {weight} heavy.") | [
"[email protected]"
] | |
107ffaf4a6924bb92f6a0915474629036599fe63 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Basic Programming/Implementation/Basics of Implementation/Very Cool Numbers/solution.py | d0b76a17de7dac94d7100683084222e2f12c7eb2 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 567 | py | from collections import Counter
from re import findall
t = int(input())
cases = []
x = 0
for _ in range(t):
r, k = map(int, input().strip().split())
cases.append((r, k))
x = max(x, r)
cools = []
for i in range(x + 1):
cools.append(len(findall("(?=101)", bin(i)[2:])))
cache = {}
for case in cases:
if case in cache:
print(cache[case])
continue
r, k = case
counter = Counter(cools[: r + 1])
occur = 0
for i, v in counter.items():
if i >= k:
occur += v
cache[(r, k)] = occur
print(occur)
| [
"[email protected]"
] | |
ff921394e8df92a8806a79e4023bf057ce4b5314 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201101151526.py | 388887e85bc5a316b4aaff19358fd78c533e9129 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
pos = (0, 0)
input_lock = 0
get_cord((0, 0))
set_highlight((0, 0), (0, 0), (0, 0), input_lock)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight((0, 0), (0, 0), (0, 0), 0)
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
cell = display.find_cell(box_index_x, box_index_y)
alpha = display.blink()
rect = pg.Surface(int(cell[2] - cell[0]), int(cell[3] - cell[1]))
rect.set_alpha(alpha)
self.screen.blit(rect, (cell[0], cell,[1]))
# print(box_index_x, box_index_y)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"[email protected]"
] | |
71800abbf276d7b24cd982bdac42bd2ef4473a07 | b8fb00ee277478c368f5b7512bfd265f3ecea356 | /python/if_condition/venv/Scripts/pip3.7-script.py | 9ef9b0fdd160350cb17bebb111a37435f82b0d3d | [] | no_license | DharmilShahJBSPL/DharmilShah | 574477c38a8b76616618130f3b0679a23a9c1af8 | 0d197189c8dcf794d38145e8f1edba6766b02df9 | refs/heads/master | 2021-07-07T11:47:03.770219 | 2019-01-19T13:13:38 | 2019-01-19T13:13:38 | 152,415,037 | 0 | 1 | null | 2020-07-20T10:44:20 | 2018-10-10T11:48:36 | Python | UTF-8 | Python | false | false | 414 | py | #!E:\dharmil\python_task\if_condition\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
b7ba8e7bdee898506b7d76ce65bc25678a96140d | fe22e8ffdb1b2f1e11becc027e71a7a512fe56eb | /util/merge.py | 83aafc267e2cfcc1d70ca06e26f9f32cd1dd0443 | [] | no_license | HEP-KBFI/stpol | 3cdb5dc125bb0394f4531abfdfe9629b0c8d0fa4 | 962837a3341dd26391025b9a07a9c1c93084bf64 | refs/heads/master | 2020-06-03T16:15:14.743807 | 2015-08-05T09:00:28 | 2015-08-05T09:00:28 | 5,716,481 | 0 | 1 | null | 2015-03-04T08:23:28 | 2012-09-07T12:27:30 | Python | UTF-8 | Python | false | false | 426 | py | from subprocess import check_call
from os import walk
from os.path import join
import sys
from glob import glob
if __name__=="__main__":
ind = sys.argv[1]
for root, dirs, items in walk(ind):
items_in_dirs = map(lambda x: glob(join(root, x, "*.root")), dirs)
tot = sum(map(lambda x: len(x), items_in_dirs))
if tot>0:
for d, i in zip(dirs, items_in_dirs):
print d, i
| [
"[email protected]"
] | |
4550cce6732f67469786d7760b44a1040f883e0d | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/bdd_crop_blur/moco_v2_800ep_basetrain/5000-iters.py | 1294176225d78d0b2d97f068e432c64d14c478c4 | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 222 | py | _base_="../base-bdd_crop_blur-config.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/moco_v2_800ep.pth')
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
| [
"[email protected]"
] | |
d12df0b6db88ebaebc427c001808a879ac88303e | f655a4f1575842190007736f0977c3cb68a70189 | /pycat | 454bcb45e1e76bea259a3ebd128c6ba331d1b5b6 | [] | no_license | bytesid19/PythonAdhoc | 4d869455b289db860a90f5379f7bfeeaa47c2bec | e358dd02def3d116e533051010f0711eadb7552f | refs/heads/master | 2022-05-03T16:30:27.533716 | 2019-02-02T15:50:53 | 2019-02-02T15:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | #!/usr/bin/python2
import os,sys
c_name=sys.argv[1:]
for i in c_name:
f=open(i,'r')
print f.read()
f.close()
| [
"[email protected]"
] | ||
9bbf7a1e9839497b6c4bf70667b9bdd845fc9d37 | 552bc626603a1757cf7836401cff5f0332a91504 | /flask/doit_JumpToFlask/chap03/03-8/회원가입_기능추가/views/question_views.py | 613d79cb4fa7e8e0f4c9bf1896a9b710c5702646 | [] | no_license | anifilm/webapp | 85f3d0aae34f46917b3c9fdf8087ec8da5303df1 | 7ef1a9a8c0dccc125a8c21b22db7db4b9d5c0cda | refs/heads/master | 2023-08-29T18:33:00.323248 | 2023-08-26T07:42:39 | 2023-08-26T07:42:39 | 186,593,754 | 1 | 0 | null | 2023-04-21T12:19:59 | 2019-05-14T09:49:56 | JavaScript | UTF-8 | Python | false | false | 1,448 | py | from datetime import datetime
from flask import Blueprint, render_template, request, url_for, g
from werkzeug.utils import redirect
from pybo import db
from pybo.models import Question
from pybo.forms import QuestionForm, AnswerForm
from pybo.views.auth_views import login_required
bp = Blueprint("question", __name__, url_prefix="/question")
@bp.route("/list/")
def _list():
page = request.args.get("page", type=int, default=1) # 페이지
question_list = Question.query.order_by(Question.create_date.desc())
question_list = question_list.paginate(page, per_page=10)
return render_template("question/question_list.html", question_list=question_list)
@bp.route("/detail/<int:question_id>/")
def detail(question_id):
form = AnswerForm()
question = Question.query.get_or_404(question_id)
return render_template(
"question/question_detail.html", question=question, form=form
)
@bp.route("/create/", methods=("GET", "POST"))
@login_required
def create():
form = QuestionForm()
if request.method == "POST" and form.validate_on_submit():
question = Question(
subject=form.subject.data,
content=form.content.data,
create_date=datetime.now(),
user=g.user,
)
db.session.add(question)
db.session.commit()
return redirect(url_for("main.index"))
return render_template("question/question_form.html", form=form)
| [
"[email protected]"
] | |
cabf99b46a69479e6f2d4ebc046f5591e50b087b | 09c39de5aad7b283cfac2f09a2b93e43086846d2 | /Unit 10 Advanced Topics in Python/01 Advanced Topics in Python/Review/15-Iterating Over Dictionaries.py | f3e71da8c2d4077bb403646af94b8975a0d49188 | [
"MIT"
] | permissive | lpython2006e/python-samples | b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa | b94ba67ce0d7798ecf796dadae206aa75da58301 | refs/heads/master | 2023-01-21T13:16:13.295163 | 2020-11-29T11:01:50 | 2020-11-29T11:01:50 | 278,653,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | movies = {
"Monty Python and the Holy Grail": "Great",
"Monty Python's Life of Brian": "Good",
"Monty Python's Meaning of Life": "Okay"
}
print(movies.items())
| [
"[email protected]"
] | |
5855ed722d94538a3f665167b38f410e54779d80 | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /sites/ncbi/04_links.py | 8f13b4f8f330a1d06c2cd610f90644165411818d | [] | no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 564 | py | from bs4 import BeautifulSoup
def getLinks(pageStr):
bs_obj = BeautifulSoup(pageStr, "html.parser")
rprts = bs_obj.findAll("div", {"class":"rprt"})
links = []
for item in rprts:
atag = item.find("a")
links.append(atag["href"].replace("/pubmed/", ""))
return links
links = []
for num in range(2, 194):
print(num)
file = open("./links_page/"+str(num) + ".html")
result = getLinks(file.read())
links = links + result
links.sort()
file = open("./links.txt", "w+")
for link in links:
file.write(link + "\n")
| [
"[email protected]"
] | |
cdbcfbbd7551dcd8dd76c054b1f050e4cc635f35 | 5e9bb224cc0e79670016c78f5262f530a343f71a | /run/get_rede_3_class_counts.py | d28773b0a2251ba3271deaba6ac6d518753d6a22 | [
"MIT"
] | permissive | olavosamp/semiauto-video-annotation | 6f86bb3b8b98bb1b910be1c95abf5474cd8526bb | b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd | refs/heads/master | 2020-06-06T02:48:16.677814 | 2020-03-10T05:22:49 | 2020-03-10T05:22:49 | 192,616,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
from pathlib import Path
# from copy import copy
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
import libs.commons as commons
''' Get class counts from rede 3 dataset csv file resulting from fuse_binary_datasets script '''
# rede = int(input("\nEnter desired net number.\n"))
rede = 3
classList = commons.rede3_classes
compiledPositivesPath = Path(dirs.iter_folder) / "dataset_rede_{}_positives_binary.csv".format(rede)
datasetDf = pd.read_csv(compiledPositivesPath)
datasetGroup = datasetDf.groupby('rede3')
print(datasetGroup.count()['FrameHash'])
countDf = pd.DataFrame(datasetGroup.count()['FrameHash'])
countDf['Counts'] = countDf['FrameHash']
total = countDf['Counts'].sum()
countDf['Percentage'] = countDf['Counts'].apply(lambda x: x/total)
print(countDf)
print(total)
countDf.drop("FrameHash", axis=1, inplace=True)
countDf.to_excel(compiledPositivesPath.with_name("semiauto_class_counts.xlsx")) | [
"[email protected]"
] | |
fbff749542a7d8d29c63a1b8284959aa9c8f310d | 865aeaf85b7cf0a27b04b5c563dee2b79443e6b7 | /docs/support/trace_support.py | f5044d28a57d4fbd35f16e3094092849c63f2f4c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | pmacosta/peng | d8cc6e8757d62fb9968a42eaf11b5b87227a8b47 | ab05fac3c0a6c0f8c70ab3e456b5cc57f0484389 | refs/heads/master | 2021-01-21T14:32:42.863574 | 2019-06-11T14:30:38 | 2019-06-11T14:30:38 | 58,596,416 | 0 | 2 | MIT | 2019-03-08T15:49:44 | 2016-05-12T01:24:18 | Python | UTF-8 | Python | false | false | 3,331 | py | # trace_support.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0411,E0401,E0611,W0212
# Standard library imports
from __future__ import print_function
import collections
import copy
import datetime
import os
import warnings
# PyPI imports
with warnings.catch_warnings():
from _pytest.warning_types import PytestWarning
warnings.filterwarnings("ignore", category=PytestWarning)
import pytest
import pmisc
import pexdoc.exdoc
###
# Functions
###
def trace_pars(mname):
"""Define trace parameters."""
pickle_fname = os.path.join(os.path.dirname(__file__), "{0}.pkl".format(mname))
ddir = os.path.dirname(os.path.dirname(__file__))
moddb_fname = os.path.join(ddir, "moddb.json")
in_callables_fname = moddb_fname if os.path.exists(moddb_fname) else None
out_callables_fname = os.path.join(ddir, "{0}.json".format(mname))
noption = os.environ.get("NOPTION", None)
exclude = ["_pytest", "execnet"]
partuple = collections.namedtuple(
"ParTuple",
[
"pickle_fname",
"in_callables_fname",
"out_callables_fname",
"noption",
"exclude",
],
)
return partuple(
pickle_fname, in_callables_fname, out_callables_fname, noption, exclude
)
def run_trace(
mname,
fname,
module_prefix,
callable_names,
no_print,
module_exclude=None,
callable_exclude=None,
debug=False,
):
"""Run module tracing."""
# pylint: disable=R0913
module_exclude = [] if module_exclude is None else module_exclude
callable_exclude = [] if callable_exclude is None else callable_exclude
par = trace_pars(mname)
start_time = datetime.datetime.now()
with pexdoc.exdoc.ExDocCxt(
exclude=par.exclude + module_exclude,
pickle_fname=par.pickle_fname,
in_callables_fname=par.in_callables_fname,
out_callables_fname=par.out_callables_fname,
_no_print=no_print,
) as exdoc_obj:
fname = os.path.realpath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"tests",
"test_{0}.py".format(fname),
)
)
test_cmd = (
["--color=yes"]
+ (["-s", "-vv"] if debug else ["-q", "-q", "-q"])
+ ["--disable-warnings"]
+ ["-x"]
+ ([par.noption] if par.noption else [])
+ ["-m " + mname]
+ [fname]
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PytestWarning)
if pytest.main(test_cmd):
raise RuntimeError("Tracing did not complete successfully")
stop_time = datetime.datetime.now()
if not no_print:
print(
"Auto-generation of exceptions documentation time: {0}".format(
pmisc.elapsed_time_string(start_time, stop_time)
)
)
for callable_name in callable_names:
callable_name = module_prefix + callable_name
print("\nCallable: {0}".format(callable_name))
print(exdoc_obj.get_sphinx_doc(callable_name, exclude=callable_exclude))
print("\n")
return copy.copy(exdoc_obj)
| [
"[email protected]"
] | |
3ef27a31c380abbfdb38ff95db3f8d2b9b8d285f | 36e12b65922ebbb6d95aff6cbac0777c47e24153 | /distinctregions_cGcC.py | 352b42f3b5be614d3c66d47944522d02075bcf1d | [
"MIT"
] | permissive | NailouZhang/AnalysisScripts | d0d00174f642d6722cc907f9a392084600630780 | 3df37d2f8fca9bc402afe5ea870c42200fca1ed3 | refs/heads/master | 2023-06-06T08:14:39.064920 | 2021-06-22T16:46:26 | 2021-06-22T16:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,677 | py | #Want to look at cGcC scores for "distinct" 3' UTR regions to get weighted cGcC scores based on the "distalness" of a sequence.
import re
import sys
from Bio import SeqIO
import numpy as np
import gffutils
#Need to get UTR regions that are distinct to each isoform
#Start with gff of 3' UTR regions
def getdistinctregions(gff, genomefasta):
distinctregions = {} #{geneid : {transcriptid(s) : [3UTR number, distinctUTRseq]}}
print 'Indexing gff...'
gff_fn = gff
db_fn = os.path.basename(gff_fn) + '.db'
if os.path.isfile(db_fn) == False:
gffutils.create_db(gff_fn, db_fn, merge_strategy = 'merge', verbose = True)
db = gffutils.FeatureDB(db_fn)
print 'Done indexing!'
print 'Indexing genome sequence...'
seq_dict = SeqIO.to_dict(SeqIO.parse(gzip.open(genomefasta), 'fasta'))
print 'Done indexing!'
genes = db.features_of_type('gene')
for gene in genes:
distinctseqs = {} #{transcriptid(s) : [pAsite counter (may be different than number of UTRs because not all UTRs are represented here, distinctUTRseq]}
seenseqs = []
utrcounter = 0
mostdownstreamcoord = 0 #The most downstream coordinate of any UTR we've seen so far for this gene.
geneid = str(gene.id).replace('gene:', '').split('.')[0]
if gene.strand == '+':
for UTR3 in db.children(gene, featuretype = 'UTR3', level = 1, order_by = 'end'):
distinctseq = ''
UTRid = str(UTR3.id).replace('UTR3:', '').split('.')[0]
#If this is the first UTR for this gene
if utrcounter == 0:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'start'):
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].upper()
distinctseq += exonseq
mostdownstreamcoord = UTR3.end
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif utrcounter >= 1:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'start'):
#If this exon is somehow contained within the last one (should not be possible), skip it
if exon.end <= mostdownstreamcoord:
pass
elif exon.end > mostdownstreamcoord:
if exon.start < mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[mostdownstreamcoord:exon.end].upper()
distinctseq += exonseq
elif exon.start >= mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start - 1:exon.end].upper()
distinctseq += exonseq
mostdownstreamcoord = UTR3.end
#Only going to call something a new polyA site if it's at least 50 nt away from the previous one
#As a proxy for this, it must have at least 50 nt of "distinct" sequence
if len(str(distinctseq)) >= 50:
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif gene.strand == '-':
for UTR3 in db.children(gene, featuretype = 'UTR3', level = 1, order_by = 'start', reverse = True):
distinctseq = ''
UTRid = str(UTR3.id).replace('UTR3:', '').split('.')[0]
#If this is the first UTR for this gene
if utrcounter == 0:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'end', reverse = True):
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].reverse_complement().upper()
#Must prepend instead of append this time
distinctseq = distinctseq + exonseq
mostdownstreamcoord = UTR3.start
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif utrcounter >= 1:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'end', reverse = True):
#If this exon is somehow contained within the last one (should not be possible), skip it
if exon.start >= mostdownstreamcoord:
continue
elif exon.start < mostdownstreamcoord:
if exon.end > mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start-1:mostdownstreamcoord-1].reverse_complement().upper()
distinctseq = distinctseq + exonseq
elif exon.start <= mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].reverse_complement().upper()
distinctseq = distinctseq + exonseq
mostdownstreamcoord = UTR3.start
if len(str(distinctseq)) >= 50:
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
distinctregions[geneid] = distinctseqs
return distinctregions
def getcGcC(seq):
#Do we want cGcC over the whole seq?
#Mean over 80 bp windows?
#Max score over all windows?
windowsize = 50
cGcCscores = []
for i in range(len(seq) - windowsize + 1):
window = seq[i:i+windowsize]
if window.count('G') == 0:
maxG = 0
else:
maxG = max(len(s) for s in re.findall(r'G+', window))
if window.count('C') == 0:
maxC = 0
else:
maxC = max(len(s) for s in re.findall(r'C+', window))
longestrun = max(maxG, maxC)
cGscore = 0
cCscore = 0
#First get the cG score
for i in range(1, longestrun + 1):
searchstring = 'G' * i
matches = re.findall(r'(?=({0}))'.format(searchstring), window)
score = len(matches) * i
cGscore += score
#Now the cC score
for i in range(1, longestrun + 1):
searchstring = 'C' * i
matches = re.findall(r'(?=({0}))'.format(searchstring), window)
score = len(matches) * i
cCscore += score
if cCscore == 0:
cGcCscore = cGscore
else:
cGcCscore = cGscore / float(cCscore)
cGcCscores.append(cGcCscore)
meanscore = np.mean(cGcCscores)
maxscore = max(cGcCscores)
return maxscore
#For every transcript with > 1 distinct region, calculate scores for each region. Then multiply that score by position factor.
#Take sum of weighted scores and divide by sum of all scores to produce a "PSI" value.
| [
"[email protected]"
] | |
356b615d823669751feac2502978c4b41465f695 | 17ec70a0387905f84f7fc1e3ee7f3428dd4e7874 | /Aoj/dpl/dpl_1_b.py | b1eb71bc3859fb849884a5eabde842e4d01b4db4 | [] | no_license | onikazu/ProgramingCompetitionPractice | da348e984b6bcb79f96f461d9df15a33730169b2 | 5a682943976bcac8646176feef9b70a6784abd8a | refs/heads/master | 2021-02-09T06:27:54.994621 | 2020-03-14T02:28:50 | 2020-03-14T02:28:50 | 244,252,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | n, limit = map(int, input().split())
items = [tuple(map(int, input().split())) for _ in range(n)]
dp = [([0] + [0] * limit) for _ in range(n+1)]
for i in range(1, n+1):
v, w = items[i-1]
for j in range(limit+1):
if j < w:
dp[i][j] = dp[i-1][j]
else:
dp[i][j] = max(dp[i-1][j], dp[i-1][j-w]+v)
print(dp[-1][-1])
| [
"[email protected]"
] | |
40bf41cc14776bfc93ee66abe4f2e8c67807d90d | 3090b3e964601e0392a03c903d28f324b4351936 | /src/demo/urls.py | fd264174fd4d5bde963b1d4b28312df4909f73b0 | [] | no_license | infoxchange/django-verification | eeebb4f7372ed95d43d8afd6f7b20ebdaa0e295e | 51ac7a648863393d44fe7a2813eccbfbee2eb615 | refs/heads/master | 2021-01-24T23:42:01.913727 | 2014-07-29T08:29:12 | 2014-07-29T08:29:12 | 24,980,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from __future__ import unicode_literals
from django.contrib import admin
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = staticfiles_urlpatterns()
urlpatterns += patterns("",
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('demo.projectapp.urls')),
url(r'^verify/', include('verification.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', name='logout'),
)
| [
"[email protected]"
] | |
e9406c5e1be4ae6d6959da2fb43a7d0f3fdd50d9 | 779afab3a8fd338a8a5b82284ca1269090ff92ee | /3 Processing/2D/BubbleSort/BubbleSort.pyde | 77c8f6c2685f9e8c88d84b4121f49d1b271ba1ac | [] | no_license | vitroid/PythonTutorials | 341ea037d168f744bee42c26c1c9408763b4bb50 | 32dd7325ca7099954f5eb33927ec9e122bb54066 | refs/heads/master | 2023-08-03T05:15:48.254380 | 2023-08-01T08:29:52 | 2023-08-01T08:29:52 | 36,958,329 | 3 | 0 | null | 2016-05-06T01:13:23 | 2015-06-05T22:22:43 | Python | UTF-8 | Python | false | false | 835 | pyde | """
ソーティング(並べ替え)のプロセスを可視化したものです。
"""
def bubblesort(data):
for x in range(len(data)):
for y in range(x+1,len(data)):
if data[x] > data[y]:
data[x],data[y] = data[y],data[x]
yield data
pix = 8
import random
data = [random.random() for i in range(50)]
iter = bubblesort(data)
def setup():
size(pix*len(data),pix*len(data))
#frameRate(3)
def draw():
d = next(iter, None)
if d == None:
noLoop()
else:
background(0)
fill(255)
noStroke()
textSize(24)
text("{0}".format(frameCount+1),10,20)
noFill()
drawone(d)
def drawone(d):
noFill()
stroke(255)
strokeWeight(2)
for i in range(len(d)):
line(0,pix*i,d[i]*width,pix*i)
| [
"[email protected]"
] | |
e6d8834ca974ac3aabfaa64cbd846a640643e735 | d57ebd908a78ad6b34f6b217f14d53fc894ff791 | /Algorithm/1836.py | e826069535f81c8de1cf17c21aeef474811d8727 | [] | no_license | ruanhq/Leetcode | df732730b63988b808a382ad17bddc49e00bc3a5 | fcb4ccbc9e84e1d2bd4541efc040bd957a96a3a2 | refs/heads/master | 2023-07-05T04:01:43.097980 | 2021-08-08T06:12:14 | 2021-08-08T06:12:14 | 320,983,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | #1836. Remove duplicates from an unsorted linked list:
import collections
class Solution:
def deleteDuplicatesUnsorted(self, head: ListNode) -> ListNode:
currentHead = head
freqMap = {}
#Construct a frequency map of the element in the linked list by scanning through.
while currentHead:
if currentHead.val in freqMap:
freqMap[currentHead.val] += 1
else:
freqMap[currentHead.val] = 1
currentHead = currentHead.next
result = ListNode(None)
currentResult = result
currentHead = head
#Only maintain those element which has frequency 1 in the linked list:
while currentHead:
if freqMap[currentHead.val] == 1:
currentResult.next = ListNode(currentHead.val)
currentResult = currentResult.next
currentHead = currentHead.next
#Return the head of the newly constructed linked list:
return result.next
#Different methodology using defaultdict:
def deleteDuplicatesUnsorted(self, head: ListNode) -> ListNode:
dicts = collections.defaultdict(int)
currHead = head
while currHead:
dicts[currHead.val] += 1
currHead = currHead.next
dummyNode = ListNode(None)
dummyNode = head
previousNode = dummyNode
while head:
if dicts[head.val] > 1:
previousNode.next = head.next
else:
previousNode = previousNode.next
head = head.next
return dummyNode.next
reducing size headaches: why use stride of 1 in CONV?
Why use padding?
Compromising based on memory constraints?
Bi-directional LSTM -> LSTM:
Compare with the logistic regression ->
Took a calculated risk
Worked beyond your responsibility?
Talk about a true failure?
| [
"[email protected]"
] | |
e38ce9a9d274b921ae53a30071c5f87e314be6a9 | 67487a6df520e3ddbd918fdf4b9c8dcd4c783147 | /PythonExercicios/Mundo 3/17_modulos_e_pacotes/ex111/teste.py | a08e94b1c22d868580dc033e894c3bfcd761062e | [
"MIT"
] | permissive | GuilhermoCampos/Curso-Python3-curso-em-video | c7d7a25f87f5a8fd706f34030282f12b1dc88e01 | 723767bc6069e9c1fa9e28fe412e694f9eb8d05e | refs/heads/master | 2021-05-18T22:33:48.062467 | 2020-09-20T10:52:40 | 2020-09-20T10:52:40 | 251,457,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Crie um pacote chamado utilidadesCeV que tenha dois módulos
# internos chamados moeda e dado.
# Transfira as funçoesutilizadas nos desafios 107, 108 e 109
# para o primeiro pacotee mantenha tuo funcionando
from utilidadescev import moeda
# Programa Principal
num = float(input('Digite um valor: '))
moeda.resumo(num, 80, 35)
| [
"[email protected]"
] | |
352740fa47b921639904c9a26c20c6b293eda782 | 61357ffa1e3b1174864a52da12c23500cfbc462a | /chatter.py | 02689dcecbb652d21784630c84d74085dc2c3d74 | [] | no_license | alexcg1/wechat-tools | 6968229a6c3440552b5612f9f9402bd3e613dc46 | 0709dedec7127a78a21eab2d56c49958aedd199f | refs/heads/master | 2022-12-13T12:00:43.164734 | 2018-10-17T04:06:14 | 2018-10-17T04:06:14 | 149,095,345 | 0 | 0 | null | 2022-12-08T02:56:23 | 2018-09-17T08:45:44 | Python | UTF-8 | Python | false | false | 5,843 | py | #!/usr/bin/env python3
import pyperclip
import readline
import threading
import time
import sys
import os
import itchat
from datetime import datetime
from itchat.content import *
from wem_functions import *
from pprint import pprint
from colorama import Fore, Back, Style
global chat_partner
App.start()
global output_widget
# download_dir = os.getenv("HOME")+'/Downloads/wechat'
# download_dir = File.download_dir
command_char = "\\"
to_me, from_me = Fore.GREEN, Fore.BLUE
unstyle = Style.RESET_ALL
Account.login()
friends = Account.friends_and_rooms()
me = friends[0]
print("Using WeChat as"+from_me,me['NickName'],me['UserName']+unstyle)
print("Files will be downloaded to",File.download_dir)
# @itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO])
# def msg_show(msg):
# print(datetime.now().strftime('%H:%M:%S'+": "), end="", flush=True)
# msg = Contact.give_name(msg) # Give a contact name to the message
# # pprint(msg)
# # Dig out who message was from and assign their human-readable name
# FromUser = {}
# FromUser['UserName'] = msg['FromUserName']
# for i in friends:
# if i['UserName'] == msg['FromUserName']:
# FromUser['Name'] = i['Name']
# # What to do for different message types
# if msg.type == "Text":
# print(to_me+FromUser['Name']+": "+unstyle+msg.text)
# Message.notify(FromUser['Name'],msg.text)
# elif msg.type in ['Attachment', 'Picture', 'Video']:
# download_files(msg, download_dir)
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg['FileName']+"] "+Style.DIM + "downloaded to "+download_dir+Style.RESET_ALL)
# global last_file
# last_file = msg['FileName']
# MessageStuff['FileName'] = msg['FileName']
# Message.notify(FromUser['Name'],"[File] "+msg['FileName'])
# else:
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg.type+"]")
@itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO])
def msg_show(msg):
FromUser = Message.get_sender_human(msg, friends)
Message.indicator = Message.date_human+": "+to_me+FromUser['Name']+": "+unstyle
msg = Contact.give_name(msg) # Give a contact name to the message
msg_show.msg_text = msg.text
msg_show.from_name = FromUser['Name']
msg_show.from_username = msg.FromUserName
msg_show.from_username_stack = []
if msg.FromUserName != me.UserName:
msg_show.from_username_stack.append(msg.FromUserName)
if msg.type == "Text":
Message.display_text = Message.indicator+msg.text
Message.notification_text = msg.text
elif msg.type in ['Attachment', 'Picture', 'Video']:
download_files(msg, File.download_dir)
Message.display_text = Message.indicator+" ["+msg.FileName+"] "+Style.DIM + "downloaded to "+File.download_dir+unstyle
Message.notification_text = msg.FileName
msg_show.filename = msg.FileName
if msg.type in ['Picture', 'Video']:
print(Err.vidpic_issue)
else:
Message.display_text = Message.indicator+"["+msg.type+"]"
Message.notification_text = msg.type
# print(Message.separator)
print(Message.display_text)
try:
if msg_show.from_username_stack[-1] != msg_show.from_username_stack[-2]:
print(Message.separator) # Print separator if sender is different to last sender
except:
pass
Message.notify(FromUser['Name'], Message.notification_text)
# @itchat.msg_register([ATTACHMENT, PICTURE, VIDEO])
# def msg_show(msg):
# msg = Contact.give_name(msg) # Give a contact name to the message
# FromUser = Message.get_sender_human(msg, friends)
# if msg.type =
# download_files(msg, download_dir)
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg['FileName']+"] "+Style.DIM + "downloaded to "+download_dir+Style.RESET_ALL)
# Message.notify(FromUser['Name'],"[File] "+msg['FileName'])
# msg_show.msg_text = msg.text
# msg_show.from_name = FromUser['Name']
# msg_show.from_username = msg.FromUserName
# if msg.type in ['Picture', 'Attachment', 'Video']:
# msg_show.filename = msg.FileName
# @itchat.msg_register([MAP, CARD, NOTE, SHARING, RECORDING])
# def msg_show(msg):
def msg_receiver():
App.listen()
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=msg_receiver)
threading1.daemon = True
threading1.start()
while True:
if 'user_to_contact' in locals():
prompt = input(from_me+"To: "+user_to_contact[1]+": "+unstyle)
else:
prompt = input("> ")
if prompt.startswith(command_char):
command = prompt[1:] # Cuts off the command char, to give us raw command text
if command in ["quit", "q"]:
App.quit()
elif command == "open":
File.open(msg.FileName)
elif command in ["pp", "paste"]:
if 'recipient' in locals():
Message.paste(pyperclip.paste(), recipient)
else:
print(Err.no_recipient)
elif command in ['yy', 'copy']:
Message.copy(msg_show.msg_text)
elif command.startswith("contact "):
arg = prompt[9:]
user_to_contact = Contact.chooser(arg, friends)
if user_to_contact != None:
recipient = user_to_contact[0]
else:
del user_to_contact
elif command in ['translate', 'tr']:
Message.translate(msg_show.msg_text)
elif command == "stack":
print(msg_show.from_username_stack)
# Debug.userstack()
elif command.startswith("send "):
filename = prompt[6:]
try:
File.send(filename, recipient)
except:
print(Err.no_recipient)
else:
print(Err.unrecognized_command)
# Now, if there's no command, send a message to selected recipient
else:
if 'recipient' in locals():
message = itchat.send_msg(msg=prompt, toUserName=recipient)
if message['BaseResponse']['RawMsg'] != "请求成功":
print("Message failed with error:",message['BaseResponse']['ErrMsg'])
# print(me['NickName']+": "+prompt) # For now we still see the prompt above where I typed the message, so disabling for now
else:
print(Err.no_recipient) | [
"[email protected]"
] | |
03f1ca426fc4cd4120dc4c58bf06a5217f7229ac | 282b1d7bce60803580f9e0e234606dfc69a3be9e | /cougar/graphs/loss/triplet_loss.py | 99ca2e66090e3da78d7b8a820ed097ac7837906d | [
"MIT"
] | permissive | Swall0w/cougar | 4b2894089ac4857dcd319e388077b7330c228512 | 9161b2b1d0c256f4bb952ec190351684f28ec1b7 | refs/heads/master | 2022-11-01T14:57:18.077650 | 2019-09-17T14:30:56 | 2019-09-17T14:30:56 | 205,391,913 | 1 | 0 | MIT | 2022-10-18T19:27:35 | 2019-08-30T13:51:48 | Python | UTF-8 | Python | false | false | 771 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = F.cosine_similarity(anchor, positive) #Each is batch X 512
distance_negative = F.cosine_similarity(anchor, negative) # .pow(.5)
losses = (1- distance_positive)**2 + (0 - distance_negative)**2 #Margin not used in cosine case.
if size_average:
return losses.mean()
else:
return losses.sum()
| [
"[email protected]"
] | |
68915f0176b12bb9db7d9f9e48a878345db3dade | 1f96392824039390531a17b43156b2a26a956d4e | /treeserver/manage.py | 02da027d3b243dbf10e99cfffe063c9e7598ae1e | [] | no_license | benthomasson/tree | 9eca1e61406effa95b3d707fde44fe0f309180f6 | 4ccf7fa47e614f1a263a312f477c08af5f2f8d0b | refs/heads/master | 2020-04-10T22:46:04.574886 | 2012-12-22T22:11:05 | 2012-12-22T22:11:05 | 5,841,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeserver.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
a09540e730eb83a0c3a0246cfa219f1c85e7a390 | 7daab7f2e91d62ba0383fa050f3dea1dc9752975 | /iniciante/1005_media_1.py | 2bb3032ceadcdd803fe1baa29f46f26b6c0055c2 | [] | no_license | luandadantas/URI-Python | 97ccdaa3835b2d2fa403f148969ca7e893d3f119 | 2cb67f39725b20e6fcbbeaf27d04c4ba05dba665 | refs/heads/master | 2022-12-04T02:51:14.374361 | 2020-08-14T17:59:58 | 2020-08-14T17:59:58 | 255,736,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | A = float(input())
B = float(input())
MEDIA = (A * 3.5 + B * 7.5) / 11
print("MEDIA = {:.5f}".format(MEDIA)) | [
"[email protected]"
] | |
77c039872c643f8c0558f8ab043ab89b3592b680 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/workloads/azext_workloads/aaz/latest/workloads/sap_database_instance/_stop.py | d2c2f9063b3cc24eb222e50a23af4c4ccc2e0eb5 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 12,353 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"workloads sap-database-instance stop",
is_preview=True,
)
class Stop(AAZCommand):
"""Stops the database instance of the SAP system.
:example: Stop Database instance of the SAP system: Currently stop action is supported for SAP HANA Database only.
az workloads sap-database-instance stop --sap-virtual-instance-name <VIS Name> -g <Resource-group-name> -n <ResourceName>
:example: Stop Database instance of the SAP system using the Azure resource ID of the instance: Currently stop action is supported for SAP HANA Database only.
az workloads sap-database-instance stop --id <ResourceID>
"""
_aaz_info = {
"version": "2023-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.workloads/sapvirtualinstances/{}/databaseinstances/{}/stop", "2023-04-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.database_instance_name = AAZStrArg(
options=["-n", "--name", "--database-instance-name"],
help="Database resource name string modeled as parameter for auto generation to work correctly.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.sap_virtual_instance_name = AAZStrArg(
options=["--vis-name", "--sap-virtual-instance-name"],
help="The name of the Virtual Instances for SAP solutions resource",
required=True,
id_part="name",
)
# define Arg Group "Body"
_args_schema = cls._args_schema
_args_schema.soft_stop_timeout_seconds = AAZIntArg(
options=["--soft-stop-timeout-seconds"],
arg_group="Body",
help="This parameter defines how long (in seconds) the soft shutdown waits until the RFC/HTTP clients no longer consider the server for calls with load balancing. Value 0 means that the kernel does not wait, but goes directly into the next shutdown state, i.e. hard stop.",
default=0,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.SAPDatabaseInstancesStopInstance(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class SAPDatabaseInstancesStopInstance(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Workloads/sapVirtualInstances/{sapVirtualInstanceName}/databaseInstances/{databaseInstanceName}/stop",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"databaseInstanceName", self.ctx.args.database_instance_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"sapVirtualInstanceName", self.ctx.args.sap_virtual_instance_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"client_flatten": True}}
)
_builder.set_prop("softStopTimeoutSeconds", AAZIntType, ".soft_stop_timeout_seconds")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_StopHelper._build_schema_operation_status_result_read(cls._schema_on_200)
return cls._schema_on_200
class _StopHelper:
"""Helper class for Stop"""
_schema_error_detail_read = None
@classmethod
def _build_schema_error_detail_read(cls, _schema):
if cls._schema_error_detail_read is not None:
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
return
cls._schema_error_detail_read = _schema_error_detail_read = AAZObjectType()
error_detail_read = _schema_error_detail_read
error_detail_read.additional_info = AAZListType(
serialized_name="additionalInfo",
flags={"read_only": True},
)
error_detail_read.code = AAZStrType(
flags={"read_only": True},
)
error_detail_read.details = AAZListType(
flags={"read_only": True},
)
error_detail_read.message = AAZStrType(
flags={"read_only": True},
)
error_detail_read.target = AAZStrType(
flags={"read_only": True},
)
additional_info = _schema_error_detail_read.additional_info
additional_info.Element = AAZObjectType()
_element = _schema_error_detail_read.additional_info.Element
_element.type = AAZStrType(
flags={"read_only": True},
)
details = _schema_error_detail_read.details
details.Element = AAZObjectType()
cls._build_schema_error_detail_read(details.Element)
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
_schema_operation_status_result_read = None
@classmethod
def _build_schema_operation_status_result_read(cls, _schema):
if cls._schema_operation_status_result_read is not None:
_schema.end_time = cls._schema_operation_status_result_read.end_time
_schema.error = cls._schema_operation_status_result_read.error
_schema.id = cls._schema_operation_status_result_read.id
_schema.name = cls._schema_operation_status_result_read.name
_schema.operations = cls._schema_operation_status_result_read.operations
_schema.percent_complete = cls._schema_operation_status_result_read.percent_complete
_schema.start_time = cls._schema_operation_status_result_read.start_time
_schema.status = cls._schema_operation_status_result_read.status
return
cls._schema_operation_status_result_read = _schema_operation_status_result_read = AAZObjectType()
operation_status_result_read = _schema_operation_status_result_read
operation_status_result_read.end_time = AAZStrType(
serialized_name="endTime",
)
operation_status_result_read.error = AAZObjectType()
cls._build_schema_error_detail_read(operation_status_result_read.error)
operation_status_result_read.id = AAZStrType()
operation_status_result_read.name = AAZStrType()
operation_status_result_read.operations = AAZListType()
operation_status_result_read.percent_complete = AAZFloatType(
serialized_name="percentComplete",
)
operation_status_result_read.start_time = AAZStrType(
serialized_name="startTime",
)
operation_status_result_read.status = AAZStrType(
flags={"required": True},
)
operations = _schema_operation_status_result_read.operations
operations.Element = AAZObjectType()
cls._build_schema_operation_status_result_read(operations.Element)
_schema.end_time = cls._schema_operation_status_result_read.end_time
_schema.error = cls._schema_operation_status_result_read.error
_schema.id = cls._schema_operation_status_result_read.id
_schema.name = cls._schema_operation_status_result_read.name
_schema.operations = cls._schema_operation_status_result_read.operations
_schema.percent_complete = cls._schema_operation_status_result_read.percent_complete
_schema.start_time = cls._schema_operation_status_result_read.start_time
_schema.status = cls._schema_operation_status_result_read.status
__all__ = ["Stop"]
| [
"[email protected]"
] | |
00b77addd4d58bcfcf481433d1298aaedd8b394f | bbd4d550b5f60763872b2ecd58c5b2bde21ecd0b | /test2/file_workflow.py | 7b5ed83826e9c4a2c7b196c1f39780e76410aa0d | [] | no_license | MaLei666/workflow_test | 4894a96f1b9858e6beb454f62cb6acad34e8fc3e | 312d23e36086bb0967f20b290a3f9b1f5f6be184 | refs/heads/master | 2022-07-31T00:13:46.307736 | 2020-05-24T14:32:18 | 2020-05-24T14:32:18 | 266,557,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,510 | py | # #-*- coding:utf-8 -*-
# # @author : MaLei
# # @datetime : 2020/4/21 7:22 下午
# # @file : file_workflow.py
# # @software : PyCharm
#
# from SpiffWorkflow.specs import WorkflowSpec,ExclusiveChoice,Simple,Cancel
# from SpiffWorkflow.serializer.json import JSONSerializer
# from SpiffWorkflow.operators import Equal,Attrib
# from SpiffWorkflow import Workflow
# import json
# def file_open(msg):
# print("file:",msg)
#
#
# class DoubleCheck(WorkflowSpec):
# '''一、 子类不重写__init__ , 实例化子类时,会自动调用父类定义的__init__
#
# 二、 子类重写了__init__时,实例化子类,就不会调用父类已经定义的__init__
#
# 三、为了能使用或扩展父类的行为,要显示调用父类的__init__方法,有以下两种调用方式:1,调用未绑定的父类构造方法。2,super继承'''
# def __init__(self):
# WorkflowSpec.__init__(self) #调用未绑定的超类构造方法【必须显式调用父类的构造方法,否则不会执行父类构造方法】
# people1_choice=ExclusiveChoice(self,'people1') #定义排他性选择任务
# self.start.connect(people1_choice) #start方法调用StartTask模块,实现放置在工作流开始处的任务,没有输入,至少有一个输出,
# cancel=Cancel(self,'workflow_aborted') # 定义取消工作流程
# people1_choice.connect(cancel)
# people2_choice=ExclusiveChoice(self,'people2')
# cond=Equal(Attrib('confirmation'),'yes') #equal运算符,Attrib标记一个值,使它可以通过valueof()被识别为一个属性值
# people1_choice.connect_if(cond,people2_choice) #如果条件匹配,则连接执行taskspec,
# people2_choice.connect(cancel) #如果没有其他条件匹配,则连接执行的任务规范。
# open=Simple(self,'file_open')
# people2_choice.connect_if(cond,open)
# open.completed_event.connect(file_open)
#
# # spec=DoubleCheck()
# #
# # serializer=JSONSerializer()
# # """
# # 执行工作流的引擎。
# #
# # 它本质上是一个管理所有分支的工具。
# #
# # 工作流也是存放正在运行的工作流的数据的地方。
# # """
# # workflow=Workflow(spec)
# # data=workflow.serialize(serializer)
# # pretty=json.dumps(json.loads(data),indent=4,separators=(',',':'))
# # open('workflow-spec.json','w').write(pretty)
#
# serializer = JSONSerializer()
# with open('workflow-spec.json') as fp:
# workflow_json = fp.read()
# spec = WorkflowSpec.deserialize(serializer, workflow_json)
# open('workflow-spec.py','w').write(spec)
from __future__ import print_function
from SpiffWorkflow.specs import WorkflowSpec, ExclusiveChoice, Simple, Cancel
from SpiffWorkflow.operators import Equal, Attrib
def my_nuclear_strike(msg):
print("Launched:", msg)
class DoubleDeckBox(WorkflowSpec):
def __init__(self):
WorkflowSpec.__init__(self)
peopleA_choice = ExclusiveChoice(self, 'peopleA')
self.start.connect(peopleA_choice)
cancel = Cancel(self, 'workflow_aborted')
peopleA_choice.connect(cancel)
peopleB_choice = ExclusiveChoice(self, 'peopleB')
cond = Equal(Attrib('confirmation'), 'yes')
peopleA_choice.connect_if(cond, peopleB_choice)
peopleB_choice.connect(cancel)
strike = Simple(self, 'nuclear_strike')
peopleB_choice.connect_if(cond, strike)
strike.completed_event.connect(my_nuclear_strike)
| [
"[email protected]"
] | |
7b213d9375e083dc0cfcf3d970c4b04d5b6fc55d | d9d75e429261d3f2b1075e0f7b598c756cb53a56 | /env/lib/python3.9/site-packages/django/db/migrations/state.py | 9432b726363321672c239d106b0d143a1bffa281 | [] | no_license | elisarocha/django-tdd-course | f89ccff07c8fd5114ae10fcb9a400be562f088db | 7333fb9b5a5f8ea0f8fe7367234de21df589be59 | refs/heads/main | 2023-02-25T11:22:23.737594 | 2021-02-01T19:53:42 | 2021-02-01T19:53:42 | 334,710,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,307 | py | import copy
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps
from django.apps.registry import apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, str):
split = model.split('.', 1)
return tuple(split) if len(split) == 2 else (app_label, split[0])
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_tuples(model):
"""
Return a list of typical (app_label, model_name) tuples for all related
models for the given model.
"""
return {
(rel_mod._meta.app_label, rel_mod._meta.model_name)
for rel_mod in _get_related_models(model)
}
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState:
"""
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
self.is_delayed = False
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def _find_reload_model(self, app_label, model_name, delay=False):
if delay:
self.is_delayed = True
related_models = set()
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
pass
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
if delay:
related_models = get_related_models_tuples(old_model)
else:
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for field in model_state.fields.values():
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
if delay:
related_models.update(get_related_models_tuples(rel_model))
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
return related_models
def reload_model(self, app_label, model_name, delay=False):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = self._find_reload_model(app_label, model_name, delay)
self._reload(related_models)
def reload_models(self, models, delay=True):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = set()
for app_label, model_name in models:
related_models.update(self._find_reload_model(app_label, model_name, delay))
self._reload(related_models)
def _reload(self, related_models):
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
new_state.is_delayed = self.is_delayed
return new_state
def clear_delayed_apps_cache(self):
if self.is_delayed and 'apps' in self.__dict__:
del self.__dict__['apps']
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
return self.models == other.models and set(self.real_apps) == set(other.real_apps)
class AppConfigStub(AppConfig):
"""Stub of an AppConfig. Only provides a label and a dict of models."""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super().__init__(label, None)
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])]
super().__init__(app_configs)
# These locks get in the way of copying as implemented in clone(),
# which is called whenever Django duplicates a StateApps before
# updating it.
self._lock = None
self.ready_event = None
self.render_multiple([*models.values(), *self.real_models])
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models = {}
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = name
self.fields = dict(fields)
self.options = options or {}
self.options.setdefault('indexes', [])
self.options.setdefault('constraints', [])
self.bases = bases or (models.Model,)
self.managers = managers or []
for name, field in self.fields.items():
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, models.OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options['indexes'] = indexes
elif name == 'constraints':
options['constraints'] = [con.clone() for con in model._meta.constraints]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=dict(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Clone fields for the body, add other bits.
body = {name: field.clone() for name, field in self.fields.items()}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options['constraints']:
if constraint.name == name:
return constraint
raise ValueError('No constraint named %s on model %s' % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all(
k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:]
for (k1, f1), (k2, f2) in zip(
sorted(self.fields.items()),
sorted(other.fields.items()),
)
) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
| [
"[email protected]"
] | |
200d1b874c07190b35bb0877add010ffb64f521a | 780900f95f56d9272a01bd50f01642efa771bd16 | /scrape.py | 95fbb4de529518f0b2404683a92f94fd932e7c8b | [] | no_license | pallih/veidigjald-signatures | 107ab19153807ee45ab0cdbfe4142e5c9731a49e | 78e301e55f76c3ff5c3672a7bf21b779080864a9 | refs/heads/master | 2021-01-22T02:49:02.968416 | 2013-06-20T00:37:59 | 2013-06-20T00:37:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import scraperwiki
import requests
import lxml.html
url = 'http://www.petitions24.com/signatures/obreytt_veidigjald/start/%s'
headers = {'User-Agent': 'Mozilla/5.0'}
#setup at start
'''
record = {}
record['last_page'] = '0'
scraperwiki.sqlite.save(['last_page'], data=record, table_name='runtime_info')
exit()
'''
selection_statement = 'last_page from runtime_info'
last_page = int(scraperwiki.sqlite.select(selection_statement)[0]['last_page'])
s = requests.Session()
s.headers.update(headers)
def scrape(last_page):
print last_page
response = s.get(url % str(int(last_page)*10).strip())
html = response.text
root = lxml.html.fromstring(html)
signatures = root.xpath('//table[@id="signatures"]/tr')
batch = []
for signature in signatures:
data = {}
data['nr'] = signature[0].text_content().strip()
data['name'] = signature[1].text_content()
if data['name'] != 'The signatory decided not to show his/her name on the Internet.':
data['place'] = signature[2].text_content()
data['place_url'] = signature[2][0].attrib['href']
data['kt'] = signature[3].text_content()
data['date'] = signature[4].text_content()
batch.append(data)
scraperwiki.sqlite.save(['nr'], data=batch, table_name='veidigjald')
update_statement= 'update runtime_info SET last_page=' + str(last_page)
scraperwiki.sqlite.execute(update_statement)
scraperwiki.sqlite.commit()
pagination = root.xpath('//div[@class="pagination"]/a[@class="go_next"]')
if pagination:
return True
else:
return False
for x in range(last_page,10000000): # How crappy is this? Probably 11.
result = scrape(x)
if result != False:
x = x +1
else:
break
| [
"[email protected]"
] | |
03151d8300aa740a122197a386f399d7e9451450 | d272b041f84bbd18fd65a48b42e0158ef6cceb20 | /catch/datasets/tho.py | 58fee5408125281b49c221f55d5e91b94fc7a0a6 | [
"MIT"
] | permissive | jahanshah/catch | bbffeadd4113251cc2b2ec9893e3d014608896ce | 2fedca15f921116f580de8b2ae7ac9972932e59e | refs/heads/master | 2023-02-19T13:30:13.677960 | 2021-01-26T03:41:10 | 2021-01-26T03:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | """Dataset with 'T'Ho virus' sequences.
A dataset with 1 'T'Ho virus' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/tho.fasta.gz", relative=True)
sys.modules[__name__] = ds
| [
"[email protected]"
] | |
cf7c9dcfbb59460a88b0b3e568306304f7583811 | ff853d7b3773db8de783fd26a76bd92742f85384 | /0x0F-python-object_relational_mapping/2-my_filter_states.py | c480b9cf408630749b250433478da0e0b9fde823 | [] | no_license | stuartses/holbertonschool-higher_level_programming | 1b3315f624f9c2dc0c63ee3481021c5ed093a81d | 40497b632bf71c3b877cb61fce79b9d82b4519da | refs/heads/master | 2020-09-29T00:51:57.791491 | 2020-05-14T16:51:44 | 2020-05-14T16:51:44 | 226,905,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | #!/usr/bin/python3
"""2. Filter states by user input
This module make a MySQL Query using MySQLdb
Filter states with a input name
Holberton School
Foundations - Higher-level programming - Python
By Stuart Echeverry
"""
if __name__ == "__main__":
import sys
import MySQLdb
len_args = len(sys.argv) - 1
args = sys.argv
db = MySQLdb.connect(host='localhost', user=args[1], passwd=args[2],
db=args[3])
cur = db.cursor()
cur.execute("SELECT * FROM states WHERE BINARY name='{}'"
" ORDER BY id ASC".format(args[4]))
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
db.close()
| [
"[email protected]"
] | |
8ce62413b34f91d34f9f8084f3428a42d543ee3b | 037d5d18b9b81205305e158d7d9fdad131d318cb | /tests/revert/test_many_to_many_relationships.py | 973dd97bac9f110e036e38a0e9b662a28abc0024 | [] | permissive | kvesteri/sqlalchemy-continuum | ee7acf2c961b27eab3dd8f61598d9159d801ee21 | a7a6bd7952185b1f82af985c0271834d886a617c | refs/heads/master | 2023-08-24T09:14:33.515416 | 2022-11-17T05:41:09 | 2023-07-24T23:37:12 | 10,312,759 | 479 | 134 | BSD-3-Clause | 2023-09-12T20:07:04 | 2013-05-27T10:30:55 | Python | UTF-8 | Python | false | false | 2,806 | py | import sqlalchemy as sa
from tests import TestCase
class TestRevertManyToManyRelationship(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
article_tag = sa.Table(
'article_tag',
self.Model.metadata,
sa.Column(
'article_id',
sa.Integer,
sa.ForeignKey('article.id', ondelete='CASCADE'),
primary_key=True,
),
sa.Column(
'tag_id',
sa.Integer,
sa.ForeignKey('tag.id', ondelete='CASCADE'),
primary_key=True
)
)
class Tag(self.Model):
__tablename__ = 'tag'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
Tag.articles = sa.orm.relationship(
Article,
secondary=article_tag,
backref='tags'
)
self.Article = Article
self.Tag = Tag
def test_revert_remove(self):
article = self.Article()
article.name = u'Some article'
tag = self.Tag(name=u'some tag')
article.tags.append(tag)
self.session.add(article)
self.session.commit()
assert len(article.versions[0].tags) == 1
article.tags.remove(tag)
self.session.commit()
self.session.refresh(article)
assert article.tags == []
article.versions[0].revert(relations=['tags'])
self.session.commit()
assert article.name == u'Some article'
assert len(article.tags) == 1
assert article.tags[0].name == u'some tag'
def test_revert_remove_with_multiple_parents(self):
article = self.Article(name=u'Some article')
tag = self.Tag(name=u'some tag')
article.tags.append(tag)
self.session.add(article)
article2 = self.Article(name=u'Some article')
tag2 = self.Tag(name=u'some tag')
article2.tags.append(tag2)
self.session.add(article2)
self.session.commit()
article.tags.remove(tag)
self.session.commit()
self.session.refresh(article)
assert len(article.tags) == 0
article.versions[0].revert(relations=['tags'])
self.session.commit()
assert article.name == u'Some article'
assert len(article.tags) == 1
assert article.tags[0].name == u'some tag'
| [
"[email protected]"
] | |
780ab5e21cc1064a813e791e44a1b898e0b246be | 783aa622b5435e202c157dfa5ff75af5df2fcf95 | /moodis/views.py | a98493daa6f11905f0ed8e9853b21b4b0e31ce2f | [] | no_license | adiram17/moodisproject | 0eac4c73e46fbfeeaa436128a57d6b6d5e0cba70 | 634ff76b85eddfd5dba6fc4061cfc54b5718477a | refs/heads/master | 2023-03-09T19:10:29.880559 | 2021-03-04T09:13:38 | 2021-03-04T09:13:38 | 342,731,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,724 | py | from django import forms
from .decorators import is_checked_today
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import ContactForm, PatientCheckForm, PatientForm, PatientViewForm, UserCreationForm
from django.contrib.auth.models import User
from .models import Patient, PatientMoodResponse, PatientMoodEpisode, Profile, Question, ResponseOption
from datetime import datetime
from .moodis_fuzzy import getMoodEpisode
def about(request):
return render(request, 'pages/about.html', {})
def contact(request):
form = ContactForm()
return render(request, 'pages/contact.html', {'form': form})
def test(request):
#messages.success(request, "Ini adalah message info")
return render(request, 'pages/test.html', {})
#TODO add email activation
def signup(request):
if request.method == 'POST':
form=UserCreationForm(request.POST)
if form.is_valid():
try:
user = User.objects.create_user(username=request.POST['username'],email=request.POST['email'], password = request.POST['password1'])
profile = user.profile
profile.full_name = request.POST['full_name']
profile.email = request.POST['email']
profile.phone = request.POST['phone']
profile.gender = request.POST['gender']
profile.age = request.POST['age']
profile.save()
#auto add registered user as patient, self registered user as patient when sign up
patient = Patient(
created_by=profile,
full_name=profile.full_name,
gender=profile.gender,
age=profile.age,
is_self = True
)
patient.save()
messages.success(request, "User berhasil dibuat. Silakan login dengan akun terdaftar.")
return redirect('home')
except Exception as e:
messages.error(request, "Terjadi kesalahan. "+str(e))
return render(request, "pages/signup.html", {'form':form})
else:
messages.error(request, "Terjadi kesalahan")
return render(request, "pages/signup.html", {'form':form})
else:
form = UserCreationForm
return render(request,'pages/signup.html', {'form':form})
@login_required
def home(request):
patient=Patient.objects.get(created_by=request.user.profile, is_self=True)
return render(request, 'pages/home.html', {'patient':patient})
@login_required
def patientEdit(request):
patient_id = request.GET['patient_id']
patient = Patient.objects.get(id=patient_id)
if request.method == 'POST':
patient.full_name = request.POST['full_name']
patient.gender = request.POST['gender']
patient.age = request.POST['age']
try:
patient.save()
if (patient.is_self):
profile = Profile.objects.get(id=request.user.profile.id)
profile.full_name = patient.full_name
profile.gender = patient.gender
profile.age = patient.age
profile.save()
messages.success(request, "Informasi profil berhasil diperbaharui")
return redirect('patient_list')
except Exception as e:
print("Terjadi kesalahan. "+str(e))
messages.error(request, "Terjadi kesalahan. "+str(e))
return redirect('patient_edit?patient_id='+str(patient_id))
else:
form = PatientForm(
initial={
'full_name':patient.full_name,
'gender':patient.gender,
'age':patient.age,
}
)
return render(request, 'pages/patient_edit.html', {'patient_id':patient_id,'form': form})
@login_required
def patientAdd(request):
form = PatientForm()
if request.method == 'POST':
patient=Patient()
patient.full_name = request.POST['full_name']
patient.gender = request.POST['gender']
patient.age = request.POST['age']
patient.created_by = request.user.profile
patient.is_self = False
try:
patient.save()
messages.success(request, "User berhasil ditambah")
return redirect('patient_list')
except Exception as e:
print("Terjadi kesalahan. "+str(e))
messages.error(request, "Terjadi kesalahan. "+str(e))
return redirect('patient_list')
else:
return render(request, 'pages/patient_add.html', {'form': form})
@login_required
def patientList(request):
patient_self=Patient.objects.get(created_by=request.user.profile, is_self=True)
patient_others=Patient.objects.filter(created_by=request.user.profile, is_self=False)
return render(request, 'pages/patient_list.html', {'patient_self':patient_self, 'patient_others':patient_others})
@login_required
def check(request):
patient_id=request.GET['patient_id']
patient = Patient.objects.get(id=patient_id)
questions = Question.objects.filter(question_type='Questionaire 1').order_by('question_number')
if request.method == 'POST':
date = request.POST['date']
#pre generate patient mood episode
patient_mood_episode=PatientMoodEpisode(
date = date,
patient=patient,
episode_score=0,
episode_category="",
)
patient_mood_episode.save()
#save answer
for question in questions:
answer_score = request.POST['option'+str(question.question_number)]
patient_mood_response =PatientMoodResponse(
patient_mood_episode=patient_mood_episode,
question=question,
patient = patient,
answer_score= answer_score,
)
patient_mood_response.save()
#TODO add fuzzy process to update episode result based on patient answers
patient_mood_responses = PatientMoodResponse.objects.filter(patient_mood_episode=patient_mood_episode)
episode_score, episode_category = getMoodEpisode(patient_mood_responses)
#update mood episode
patient_mood_episode.episode_score=episode_score
patient_mood_episode.episode_category=episode_category
patient_mood_episode.save()
patient_mood_episode_id = patient_mood_episode.id
messages.success(request, "Submit kuesioner berhasil")
return redirect('/postcheck/?patient_id='+str(patient_id)+'&patient_mood_episode_id='+str(patient_mood_episode_id))
else:
form = PatientCheckForm(
initial={
'full_name':patient.full_name,
'gender':patient.gender,
'age':patient.age,
'date': datetime.now().date()
}
)
response_options=None
for question in questions:
response_option_temp=ResponseOption.objects.filter(question=question.id)
if (response_options==None):
response_options=response_option_temp #initiate
else:
response_options = response_options|response_option_temp
return render(request, 'pages/check.html', {'form':form, 'questions':questions, 'response_options':response_options})
@login_required
def postcheck(request):
patient_id=request.GET['patient_id']
patient_mood_episode_id=request.GET['patient_mood_episode_id']
patient_mood_episode = PatientMoodEpisode.objects.get(id=patient_mood_episode_id)
return render(request, 'pages/postcheck.html', {'patient_id':patient_id, 'patient_mood_episode':patient_mood_episode})
@login_required
def patientDetail(request):
patient_id= request.GET['patient_id']
patient=Patient.objects.get(id=patient_id)
form = PatientViewForm(
initial={
'full_name':patient.full_name,
'gender':patient.gender,
'age':patient.age,
}
)
patient_mood_episodes = PatientMoodEpisode.objects.filter(patient=patient)
return render(request, 'pages/patient_detail.html', {'patient_id':patient_id, 'form':form, 'patient_mood_episodes':patient_mood_episodes})
@login_required
def patientDelete(request, patient_id):
patient=Patient.objects.get(id=patient_id)
try:
if (patient!=None):
patient.delete()
messages.success(request, "Data berhasil dihapus")
except Exception as e:
messages.error(request, "Terjadi kesalahan. Data gagal dihapus. "+str(e))
return redirect('patient_list')
@login_required
def patientMoodEpisodeDetail(request):
patient_mood_episode_id= request.GET['patient_mood_episode_id']
patient_mood_episode=PatientMoodEpisode.objects.get(id=patient_mood_episode_id)
patient_mood_responses = PatientMoodResponse.objects.filter(patient_mood_episode=patient_mood_episode)
return render(request, 'pages/patient_mood_episode_detail.html', {'patient_mood_episode':patient_mood_episode, 'patient_mood_responses':patient_mood_responses})
@login_required
def patientMoodEpisodeDelete(request, id):
patient_mood_episode = PatientMoodEpisode.objects.get(id=id)
patient_id = patient_mood_episode.patient.id
if (patient_mood_episode!=None):
patient_mood_episode.delete()
messages.success(request, "Data berhasil dihapus")
else:
messages.error(request, "Terjadi kesalahan. Data gagal dihapus")
return redirect('/patient_detail/?patient_id='+str(patient_id))
| [
"[email protected]"
] | |
124e0cf8a41934ebf92b0eb5a3efd1570477083a | 9d36c0a474edb00750e08d0a82bf31a2e3d5af00 | /src/qibo/tests/test_core_circuit_features.py | 30251523f9f2268cc8d9dd88e041a88e5f74d766 | [
"Apache-2.0"
] | permissive | ZDQpeter/qibo | b2b48bbc46077a76ea886eff1ddbf1b96c83c37b | d8bd2d3de0d8eb12a428a9125302e318480e982a | refs/heads/master | 2023-08-11T03:55:32.241032 | 2021-10-14T10:06:25 | 2021-10-14T10:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,767 | py | """Test how features defined in :class:`qibo.abstractions.circuit.AbstractCircuit` work during circuit execution."""
import numpy as np
import pytest
from qibo import K, gates
from qibo.models import Circuit
@pytest.mark.parametrize("compile", [False, True])
def test_circuit_vs_gate_execution(backend, compile):
"""Check consistency between executing circuit and stand alone gates."""
from qibo import K
theta = 0.1234
target_c = Circuit(2)
target_c.add(gates.X(0))
target_c.add(gates.X(1))
target_c.add(gates.CU1(0, 1, theta))
target_result = target_c()
# custom circuit
def custom_circuit(initial_state, theta):
l1 = gates.X(0)(initial_state)
l2 = gates.X(1)(l1)
o = gates.CU1(0, 1, theta)(l2)
return o
initial_state = target_c.get_initial_state()
if compile:
c = K.compile(custom_circuit)
else:
c = custom_circuit
result = c(initial_state, theta)
K.assert_allclose(result, target_result)
def test_circuit_addition_execution(backend, accelerators):
c1 = Circuit(4, accelerators)
c1.add(gates.H(0))
c1.add(gates.H(1))
c1.add(gates.H(2))
c2 = Circuit(4, accelerators)
c2.add(gates.CNOT(0, 1))
c2.add(gates.CZ(2, 3))
c3 = c1 + c2
c = Circuit(4, accelerators)
c.add(gates.H(0))
c.add(gates.H(1))
c.add(gates.H(2))
c.add(gates.CNOT(0, 1))
c.add(gates.CZ(2, 3))
K.assert_allclose(c3(), c())
@pytest.mark.parametrize("deep", [False, True])
def test_copied_circuit_execution(backend, accelerators, deep):
"""Check that circuit copy execution is equivalent to original circuit."""
theta = 0.1234
c1 = Circuit(4, accelerators)
c1.add([gates.X(0), gates.X(1), gates.CU1(0, 1, theta)])
c1.add([gates.H(2), gates.H(3), gates.CU1(2, 3, theta)])
if not deep and accelerators is not None:
with pytest.raises(ValueError):
c2 = c1.copy(deep)
else:
c2 = c1.copy(deep)
K.assert_allclose(c2(), c1())
@pytest.mark.parametrize("fuse", [False, True])
def test_inverse_circuit_execution(backend, accelerators, fuse):
c = Circuit(4, accelerators)
c.add(gates.RX(0, theta=0.1))
c.add(gates.U2(1, phi=0.2, lam=0.3))
c.add(gates.U3(2, theta=0.1, phi=0.3, lam=0.2))
c.add(gates.CNOT(0, 1))
c.add(gates.CZ(1, 2))
c.add(gates.fSim(0, 2, theta=0.1, phi=0.3))
c.add(gates.CU2(0, 1, phi=0.1, lam=0.1))
if fuse:
if accelerators:
with pytest.raises(NotImplementedError):
c = c.fuse()
else:
c = c.fuse()
invc = c.invert()
target_state = np.ones(2 ** 4) / 4
final_state = invc(c(np.copy(target_state)))
K.assert_allclose(final_state, target_state)
def test_circuit_invert_and_addition_execution(backend, accelerators):
subroutine = Circuit(6)
subroutine.add([gates.RX(i, theta=0.1) for i in range(5)])
subroutine.add([gates.CZ(i, i + 1) for i in range(0, 5, 2)])
middle = Circuit(6)
middle.add([gates.CU2(i, i + 1, phi=0.1, lam=0.2) for i in range(0, 5, 2)])
circuit = subroutine + middle + subroutine.invert()
c = Circuit(6)
c.add([gates.RX(i, theta=0.1) for i in range(5)])
c.add([gates.CZ(i, i + 1) for i in range(0, 5, 2)])
c.add([gates.CU2(i, i + 1, phi=0.1, lam=0.2) for i in range(0, 5, 2)])
c.add([gates.CZ(i, i + 1) for i in range(0, 5, 2)])
c.add([gates.RX(i, theta=-0.1) for i in range(5)])
assert c.depth == circuit.depth
K.assert_allclose(circuit(), c())
@pytest.mark.parametrize("distribute_small", [False, True])
def test_circuit_on_qubits_execution(backend, accelerators, distribute_small):
if distribute_small:
smallc = Circuit(3, accelerators=accelerators)
else:
smallc = Circuit(3)
smallc.add((gates.RX(i, theta=i + 0.1) for i in range(3)))
smallc.add((gates.CNOT(0, 1), gates.CZ(1, 2)))
largec = Circuit(6, accelerators=accelerators)
largec.add((gates.RY(i, theta=i + 0.2) for i in range(0, 6, 2)))
largec.add(smallc.on_qubits(1, 3, 5))
targetc = Circuit(6)
targetc.add((gates.RY(i, theta=i + 0.2) for i in range(0, 6, 2)))
targetc.add((gates.RX(i, theta=i // 2 + 0.1) for i in range(1, 6, 2)))
targetc.add((gates.CNOT(1, 3), gates.CZ(3, 5)))
assert largec.depth == targetc.depth
K.assert_allclose(largec(), targetc())
@pytest.mark.parametrize("distribute_small", [False, True])
def test_circuit_on_qubits_double_execution(backend, accelerators, distribute_small):
if distribute_small:
smallc = Circuit(3, accelerators=accelerators)
else:
smallc = Circuit(3)
smallc.add((gates.RX(i, theta=i + 0.1) for i in range(3)))
smallc.add((gates.CNOT(0, 1), gates.CZ(1, 2)))
# execute the small circuit before adding it to the large one
_ = smallc()
largec = Circuit(6, accelerators=accelerators)
largec.add((gates.RY(i, theta=i + 0.2) for i in range(0, 6, 2)))
if distribute_small and accelerators is not None:
with pytest.raises(RuntimeError):
largec.add(smallc.on_qubits(1, 3, 5))
else:
largec.add(smallc.on_qubits(1, 3, 5))
targetc = Circuit(6)
targetc.add((gates.RY(i, theta=i + 0.2) for i in range(0, 6, 2)))
targetc.add((gates.RX(i, theta=i // 2 + 0.1) for i in range(1, 6, 2)))
targetc.add((gates.CNOT(1, 3), gates.CZ(3, 5)))
assert largec.depth == targetc.depth
K.assert_allclose(largec(), targetc())
def test_circuit_on_qubits_controlled_by_execution(backend, accelerators):
smallc = Circuit(3)
smallc.add(gates.RX(0, theta=0.1).controlled_by(1, 2))
smallc.add(gates.RY(1, theta=0.2).controlled_by(0))
smallc.add(gates.RX(2, theta=0.3).controlled_by(1, 0))
smallc.add(gates.RZ(1, theta=0.4).controlled_by(0, 2))
largec = Circuit(6, accelerators=accelerators)
largec.add(gates.H(i) for i in range(6))
largec.add(smallc.on_qubits(1, 4, 3))
targetc = Circuit(6)
targetc.add(gates.H(i) for i in range(6))
targetc.add(gates.RX(1, theta=0.1).controlled_by(3, 4))
targetc.add(gates.RY(4, theta=0.2).controlled_by(1))
targetc.add(gates.RX(3, theta=0.3).controlled_by(1, 4))
targetc.add(gates.RZ(4, theta=0.4).controlled_by(1, 3))
assert largec.depth == targetc.depth
K.assert_allclose(largec(), targetc())
@pytest.mark.parametrize("controlled", [False, True])
def test_circuit_on_qubits_with_unitary_execution(backend, accelerators, controlled):
unitaries = np.random.random((2, 2, 2))
smallc = Circuit(2)
if controlled:
smallc.add(gates.Unitary(unitaries[0], 0).controlled_by(1))
smallc.add(gates.Unitary(unitaries[1], 1).controlled_by(0))
else:
smallc.add(gates.Unitary(unitaries[0], 0))
smallc.add(gates.Unitary(unitaries[1], 1))
smallc.add(gates.CNOT(0, 1))
largec = Circuit(4, accelerators=accelerators)
largec.add(gates.RY(0, theta=0.1))
largec.add(gates.RY(1, theta=0.2))
largec.add(gates.RY(2, theta=0.3))
largec.add(gates.RY(3, theta=0.2))
largec.add(smallc.on_qubits(3, 0))
targetc = Circuit(4)
targetc.add(gates.RY(0, theta=0.1))
targetc.add(gates.RY(1, theta=0.2))
targetc.add(gates.RY(2, theta=0.3))
targetc.add(gates.RY(3, theta=0.2))
if controlled:
targetc.add(gates.Unitary(unitaries[0], 3).controlled_by(0))
targetc.add(gates.Unitary(unitaries[1], 0).controlled_by(3))
else:
targetc.add(gates.Unitary(unitaries[0], 3))
targetc.add(gates.Unitary(unitaries[1], 0))
targetc.add(gates.CNOT(3, 0))
assert largec.depth == targetc.depth
K.assert_allclose(largec(), targetc())
def test_circuit_on_qubits_with_varlayer_execution(backend, accelerators):
thetas = np.random.random([2, 4])
smallc = Circuit(4)
smallc.add(gates.VariationalLayer(range(4), [(0, 1), (2, 3)],
gates.RX, gates.CNOT,
thetas[0]))
largec = Circuit(8, accelerators=accelerators)
largec.add(smallc.on_qubits(*range(0, 8, 2)))
largec.add(gates.VariationalLayer(range(1, 8, 2), [(1, 3), (5, 7)],
gates.RY, gates.CZ,
thetas[1]))
targetc = Circuit(8)
targetc.add(gates.VariationalLayer(range(0, 8, 2), [(0, 2), (4, 6)],
gates.RX, gates.CNOT,
thetas[0]))
targetc.add(gates.VariationalLayer(range(1, 8, 2), [(1, 3), (5, 7)],
gates.RY, gates.CZ,
thetas[1]))
assert largec.depth == targetc.depth
K.assert_allclose(largec(), targetc())
def test_circuit_decompose_execution(backend):
c = Circuit(6)
c.add(gates.RX(0, 0.1234))
c.add(gates.RY(1, 0.4321))
c.add((gates.H(i) for i in range(2, 6)))
c.add(gates.CNOT(0, 1))
c.add(gates.X(3).controlled_by(0, 1, 2, 4))
decomp_c = c.decompose(5)
K.assert_allclose(c(), decomp_c(), atol=1e-6)
def test_repeated_execute_pauli_noise_channel(backend):
thetas = np.random.random(4)
c = Circuit(4)
c.add((gates.RY(i, t) for i, t in enumerate(thetas)))
c.add((gates.PauliNoiseChannel(i, px=0.1, py=0.2, pz=0.3, seed=1234)
for i in range(4)))
final_state = c(nshots=20)
np.random.seed(1234)
target_state = []
for _ in range(20):
noiseless_c = Circuit(4)
noiseless_c.add((gates.RY(i, t) for i, t in enumerate(thetas)))
for i in range(4):
if np.random.random() < 0.1:
noiseless_c.add(gates.X(i))
if np.random.random() < 0.2:
noiseless_c.add(gates.Y(i))
if np.random.random() < 0.3:
noiseless_c.add(gates.Z(i))
target_state.append(noiseless_c())
target_state = np.stack(target_state)
K.assert_allclose(final_state, target_state)
def test_repeated_execute_with_noise(backend):
thetas = np.random.random(4)
c = Circuit(4)
c.add((gates.RY(i, t) for i, t in enumerate(thetas)))
noisy_c = c.with_noise((0.2, 0.0, 0.1))
np.random.seed(1234)
final_state = noisy_c(nshots=20)
np.random.seed(1234)
target_state = []
for _ in range(20):
noiseless_c = Circuit(4)
for i, t in enumerate(thetas):
noiseless_c.add(gates.RY(i, theta=t))
if np.random.random() < 0.2:
noiseless_c.add(gates.X(i))
if np.random.random() < 0.1:
noiseless_c.add(gates.Z(i))
target_state.append(noiseless_c())
target_state = np.stack(target_state)
K.assert_allclose(final_state, target_state)
| [
"[email protected]"
] | |
6d355c5c5093df76111c6227f1d90df7885f252b | 2744fbd0c33c181f6bb71abbb26982f57a07ae9a | /config.py | ff0067d0d32bc4e56fcd10d7032f29d1dfc7de97 | [] | no_license | mehulchopradev/yannick-webapp | f2d777cfc23786b142551deefb2437cd24fb7592 | 4eb18a574177fb3f2d595255b4d3d421d5518944 | refs/heads/master | 2020-04-01T21:01:15.273151 | 2018-10-18T14:01:47 | 2018-10-18T14:01:47 | 153,633,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | class Config:
SQLALCHEMY_DATABASE_URI='mysql+mysqlconnector://root:root@localhost/yannickwebapp_db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"[email protected]"
] | |
317568248fa7b20af7cf4cfac25cb622ab9e5eb1 | 46244bb6af145cb393846505f37bf576a8396aa0 | /algorithms/ch4/4.1.13.py | 1d5cc8e306e2a491210d8e21daa6a91adca28254 | [] | no_license | aoeuidht/homework | c4fabfb5f45dbef0874e9732c7d026a7f00e13dc | 49fb2a2f8a78227589da3e5ec82ea7844b36e0e7 | refs/heads/master | 2022-10-28T06:42:04.343618 | 2022-10-15T15:52:06 | 2022-10-15T15:52:06 | 18,726,877 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import graph
class BreadthFirstPaths:
def __init__(self, g, s):
self.g = g
self.s = s
self.marked = set()
def dist_to(self, v):
# the queue for the vertex list
return dist_to_wrapper(self.s, v, 0)
def dist_to_wrapper(self, s, v, depth):
vertex_queue = [(s, 0)]
while vertex_queue:
s, depth = vertex_queue.pop(0)
self.marked.add(s)
for _s in self.g.adj(s):
if _s in self.marked:
continue
# add them to the queue
self.marked.add(_s)
# check if target
if _s == v:
return depth + 1
self.vertex_queue.append((_s, depth+1))
| [
"[email protected]"
] | |
ebc5a7e1b4004327a673f2af979efc3495396138 | 9fcd6a91132fd12731d259fe7d709cdf222381bb | /2020/19/foo.py | 8c0e0c373ff1f394ff39b4c49fbfbf9a2b6b2b61 | [] | no_license | protocol7/advent-of-code | f5bdb541d21414ba833760958a1b9d05fc26f84a | fa110cef83510d86e82cb5d02f6af5bb7016f2c7 | refs/heads/master | 2023-04-05T15:33:26.146031 | 2023-03-18T14:22:43 | 2023-03-18T14:22:43 | 159,989,507 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import sys
from collections import *
import re
rs, msgs = sys.stdin.read().split("\n\n")
rules = defaultdict(list)
for r in rs.split("\n"):
a, b = r.split(": ")
if '"' in b:
rules[a] = b[1:-1]
else:
for o in b.split(" | "):
rules[a].append(o.split())
def build(rule):
reg = ""
r = rules[rule]
if type(r) == str:
return r
else:
for oi, ors in enumerate(r):
if oi > 0:
reg += "|"
for rx in ors:
reg += build(rx)
return "(%s)" % reg
r = "^%s$" % build("0")
print(sum(bool(re.match(r, m)) for m in msgs.split()))
| [
"[email protected]"
] | |
ed6a9ce70978a4f43af6c5e0df07abcb7cda0242 | c61a28aba19f7cdf9a5127e8a782bf115c265e70 | /apps/recruitpro/recruitpro/recruitpro/doctype/project_user/project_user.py | 2485ab02a13cefca8d30d07a52de8ce60c0592d9 | [
"MIT"
] | permissive | sharmilaviji/RecruitPRO-NEW | fa72c8fc00f469a41798b1047c11dcc470fbc495 | dcfaedebe56b45acd6ddcab7e24c939b853a2c8c | refs/heads/master | 2021-05-26T12:14:12.611154 | 2020-04-27T04:40:50 | 2020-04-27T04:40:50 | 254,125,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ProjectUser(Document):
pass
| [
"[email protected]"
] | |
7977a21d9cd1ff00ad98f7bdf786cad24b051af1 | 423ba09a145b3468a322acf4ddf7d2c2446e227d | /atcoder/abc119/abc119_c.py | 4d8b05eaf0c2223837bb66ee8d6761db24fbe938 | [] | no_license | hirosuzuki/procon | 13d3bc332d6e4368fd54fec742b32b09729658ed | 533f40e13456542b202905a61814ad926c3c206e | refs/heads/master | 2021-05-12T10:42:51.920561 | 2020-05-20T15:49:05 | 2020-05-20T15:49:05 | 117,356,746 | 0 | 0 | null | 2018-01-29T14:46:28 | 2018-01-13T15:56:40 | Python | UTF-8 | Python | false | false | 832 | py | N, A, B, C = [int(_) for _ in input().split()]
L = [int(input()) for i in range(N)]
from itertools import combinations
def combi(xs, left):
for i in range(len(xs) - left):
for r in combinations(xs, i + 1):
yield r
def diff(a, b):
result = a[:]
for x in b:
if x in result:
result.remove(x)
return result
def calc(xs):
result = 10**100
for ra in combi(xs, 2):
sa = abs(A - sum(ra)) + len(ra) * 10 - 10
xsa = diff(xs, ra)
for rb in combi(xsa, 1):
sb = abs(B - sum(rb)) + len(rb) * 10 - 10
xsb = diff(xsa, rb)
for rc in combi(xsb, 0):
sc = abs(C - sum(rc)) + len(rc) * 10 - 10
total = sa + sb + sc
result = min(result, total)
return result
print(calc(L))
| [
"[email protected]"
] | |
ecb100331d176a8a055fc506bdae23a213d460f5 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_10099.py | 4e2bc685592970fe8cc2e352b2e7c291cafac756 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | # How can I get Bottle to restart on file change?
from bottle import run
run(reloader=True)
| [
"[email protected]"
] | |
cb551e3a871a6f00f0bc6847baa76f890601f945 | 6ad55b80c944e123fef29468aa018e15505cd4a3 | /references/lr/parser/adder.py | 41435d2272f198ac21eec7fa984834313768a71b | [] | no_license | ramalho/kaminpy | c7799d58edd81ada1ba7914528d16872ecb771f2 | 2f5038a9ebacb378fc45fd7fd840a50ac47c940e | refs/heads/master | 2021-01-13T02:22:16.765278 | 2018-11-19T12:34:45 | 2018-11-19T12:34:45 | 2,436,569 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import token as py_token
import util
class Token:
def __init__(self, src, start, end, line):
self.src = src
self.start = start
self.end = end
self.line = line
self.evaluate()
def evaluate(self):
pass
def nud(self):
raise SyntaxError('unexpected %r in line:\n%s' % (self.src, self.line))
class Literal(Token):
def evaluate(self):
self.value = int(self.src)
def nud(self):
return self.value
class OperatorAdd(Token):
lbp = 10
def led(self, left):
right = expression(10)
return left + right
class End(Token):
lbp = 0
TOKENS = {
py_token.NUMBER: Literal,
py_token.PLUS: OperatorAdd,
py_token.ENDMARKER: End,
}
def tokenize(src):
for token_info in util.tokenize(src):
token_class = TOKENS[token_info.type]
yield token_class(*token_info[1:])
token = Ellipsis
def expression(rbp=0):
global token
t = token
token = next()
left = t.nud()
while rbp < token.lbp:
t = token
token = next()
left = t.led(left)
return left
def evaluate(src):
"""
>>> evaluate("1 + 2")
3
"""
global token, next
next = tokenize(src).__next__
token = next()
if isinstance(token, End):
return None
try:
return expression()
except StopIteration:
raise SyntaxError('unexpected end of source')
| [
"[email protected]"
] | |
044e6cd2bcf67dd9722c851da59f18e8d33316c7 | c6dfa13fad324443e1e93a6a3e165938f6114bfe | /mysite_register/views.py | 8f05f5163c8230d657018d89d436baab542e335a | [] | no_license | keystonecyberstop/testTimePass | d94687647b2ad5b933a1ac99db856778e96a4acd | ff505f12889157c69694b648fe608de9f9511e43 | refs/heads/master | 2022-12-24T03:24:52.013959 | 2020-10-11T11:32:38 | 2020-10-11T11:32:38 | 303,101,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from django.shortcuts import render
from django.views.generic import ListView
from .models import Entry
class HomeView(ListView):
model = Entry
template_name = 'entries/index.html'
# def get_context_data(self, **kwargs):
# print(kwargs)
# context = super().get_context_data(**kwargs)
# print('>>>>>>>>>>>')
# print(context)
# print('>>>>>>>>>>>')
# return context
# def home(request):
# """
# docstring
# """
# context = {
# 'entry_list' : Entry.objects.all()
# }
# return render(request, 'entries/index.html', context=context) | [
"--global"
] | --global |
492309989d063058f709494cce513673f29adee7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2190/48117/253305.py | 9bedb5ef7bc7e20973cbbca26cc70d059c294ec9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | questNum = int(input())
for i in range(questNum):
quest = input().split(' ')
s = quest[0]
k = int(quest[1])
sa = []
index = 0
for count in range(1, len(s) + 1):
for i in range(len(s)):
if i + count <= len(s):
sa.append(s[i:i + count])
countList = [0] * len(sa)
for string in sa:
count = sa.count(string)
countList[len(string) - 1] = count
lengthList = [0]*len(s)
for index in range(len(sa)):
if countList[index] == k:
lengthList[len(sa[index]) - 1] += 1
ans = -1
for l in lengthList:
if l != 0 :
if l > ans:
ans = l
print(ans) | [
"[email protected]"
] | |
2ed3942ae4abf416e93c8b60d8e80353a2aeb6ce | 62b066da43081895098e43b31e843a1af3700546 | /CNN/FractalNet/test.py | 136d42b6cc9bbe3229ba7c4cb1573cdeec72665b | [
"MIT"
] | permissive | bssrdf/Paper2Code | 6945251cf508e99c2d7d40b2b8639e993ab30bff | 2fb18b3d4ed66b68cab86139c4d9a0dcbf1eba39 | refs/heads/master | 2022-03-30T16:52:21.834511 | 2020-04-18T13:25:59 | 2020-04-18T13:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import os
import glob
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.datasets import cifar10
from keras.layers import Activation, Input, Dense, Flatten
from keras.models import Model
from keras.optimizers import SGD, RMSprop, Adam, Nadam
from keras.utils import plot_model
from keras.utils.np_utils import to_categorical
from keras import backend as K
from fractalnet import FractalNet
# paper implementation details
NB_CLASSES = 10
NB_EPOCHS = 400
LEARN_START = 0.02
BATCH_SIZE = 100
MOMENTUM = 0.9
Dropout = [0., 0.1, 0.2, 0.3, 0.4]
CONV = [(3, 3, 64), (3, 3, 128), (3, 3, 256), (3, 3, 512), (2, 2, 512)]
# cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
Y_train = to_categorical(y_train, NB_CLASSES)
Y_test = to_categorical(y_test, NB_CLASSES)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.
def learning_rate(epoch):
if epoch < 200:
return 0.02
if epoch < 300:
return 0.002
if epoch < 350:
return 0.0002
if epoch < 375:
return 0.00002
return 0.000002
# build network
im_in = Input(shape=(32, 32, 3))
output = FractalNet(B=5, C=3, conv=CONV, drop_path=0.15, dropout=Dropout, deepest=False)(im_in)
output = Flatten()(output)
output = Dense(NB_CLASSES, init='glorot_normal')(output)
output = Activation('softmax')(output)
model = Model(im_in, output)
optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
plot_model(model, to_file='model.png', show_shapes=True)
# train
learn = LearningRateScheduler(learning_rate)
model.fit(x=X_train, y=Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCHS,
validation_data=(X_test, Y_test), callbacks=[learn])
| [
"[email protected]"
] | |
c72e1e6f41a202b2b1cd09a03a1bee06830d0410 | 1f1e8b335470065b67cce28338cfb4a6da503c95 | /0x08-python-more_classes/1-rectangle.py | 3c53e4b25f9cde658c140933cf6de96b3862d9a7 | [] | no_license | guxal/holbertonschool-higher_level_programming | 48353071e719a509e10f3067f0c3f88cb44bd27d | fffd287c510602dc45e36df486f60cdfa1205335 | refs/heads/master | 2020-07-22T23:40:03.266880 | 2020-02-14T02:42:11 | 2020-02-14T02:42:11 | 207,370,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | #!/usr/bin/python3
class Rectangle:
"""
The Rectangle class create a new object
Args:
width (int): integer value of the width
height (int): integer value of the height
Attributes:
width (int): integer value of the width
height (int): integer value of the height
"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
"""Get width"""
return self.__width
@width.setter
def width(self, value):
"""Set width"""
if isinstance(value, int) is False:
raise TypeError("width must be an integer")
elif value < 0:
raise ValueError("width must be >= 0")
else:
self.__width = value
@property
def height(self):
"""Get height"""
return self.__height
@height.setter
def height(self, value):
"""Set heigth"""
if isinstance(value, int) is False:
raise TypeError("height must be an integer")
elif value < 0:
raise ValueError("height must be >= 0")
else:
self.__height = value
| [
"[email protected]"
] | |
a79112df59f5d14d3ae00fe0a7bcfb6ee357574e | 87af51366dd4597fb6ecf7887e44a53dacf67364 | /juaphe/wsgi.py | 19cbcbef0a0746d16594e05a93fdf826304ac291 | [] | no_license | MoTechStore/CIS | 8707e17c1a1702dfdf17dbbba08e6ccf232acb45 | b169cf374efdf1db315fde345f865f0831e58694 | refs/heads/master | 2023-06-24T07:43:14.144910 | 2021-04-22T15:31:50 | 2021-04-22T15:31:50 | 360,562,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for juaphe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'juaphe.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
b44f9382471ab49f9894512005ab77ff77173ff7 | 81d955c3ac886e690ceb01026ed769b1784dbef9 | /purity_fb/purity_fb_1dot8/models/subnet.py | d5f081f13b76b165b6e17f82183ae01b07055c45 | [
"Apache-2.0"
] | permissive | etsangsplk/purity_fb_python_client | bc525ef1a18f6a79c1536cb4519a7efd58d09d89 | 0807a0fa2eab273bc08f73266d9cda9fa33b37bd | refs/heads/master | 2020-06-03T05:49:03.015147 | 2019-05-16T06:11:40 | 2019-05-16T06:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,340 | py | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.8), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Subnet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'enabled': 'bool',
'gateway': 'str',
'interfaces': 'list[Reference]',
'link_aggregation_group': 'Reference',
'mtu': 'int',
'prefix': 'str',
'services': 'list[str]',
'vlan': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'enabled': 'enabled',
'gateway': 'gateway',
'interfaces': 'interfaces',
'link_aggregation_group': 'link_aggregation_group',
'mtu': 'mtu',
'prefix': 'prefix',
'services': 'services',
'vlan': 'vlan'
}
def __init__(self, id=None, name=None, enabled=None, gateway=None, interfaces=None, link_aggregation_group=None, mtu=None, prefix=None, services=None, vlan=None):
"""
Subnet - a model defined in Swagger
"""
self._id = None
self._name = None
self._enabled = None
self._gateway = None
self._interfaces = None
self._link_aggregation_group = None
self._mtu = None
self._prefix = None
self._services = None
self._vlan = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if enabled is not None:
self.enabled = enabled
if gateway is not None:
self.gateway = gateway
if interfaces is not None:
self.interfaces = interfaces
if link_aggregation_group is not None:
self.link_aggregation_group = link_aggregation_group
if mtu is not None:
self.mtu = mtu
if prefix is not None:
self.prefix = prefix
if services is not None:
self.services = services
if vlan is not None:
self.vlan = vlan
@property
def id(self):
"""
Gets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this Subnet.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this Subnet.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:return: The name of this Subnet.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this Subnet.
:type: str
"""
self._name = name
@property
def enabled(self):
"""
Gets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:return: The enabled of this Subnet.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:param enabled: The enabled of this Subnet.
:type: bool
"""
self._enabled = enabled
@property
def gateway(self):
"""
Gets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:return: The gateway of this Subnet.
:rtype: str
"""
return self._gateway
@gateway.setter
def gateway(self, gateway):
"""
Sets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:param gateway: The gateway of this Subnet.
:type: str
"""
self._gateway = gateway
@property
def interfaces(self):
"""
Gets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:return: The interfaces of this Subnet.
:rtype: list[Reference]
"""
return self._interfaces
@interfaces.setter
def interfaces(self, interfaces):
"""
Sets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:param interfaces: The interfaces of this Subnet.
:type: list[Reference]
"""
self._interfaces = interfaces
@property
def link_aggregation_group(self):
"""
Gets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:return: The link_aggregation_group of this Subnet.
:rtype: Reference
"""
return self._link_aggregation_group
@link_aggregation_group.setter
def link_aggregation_group(self, link_aggregation_group):
"""
Sets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:param link_aggregation_group: The link_aggregation_group of this Subnet.
:type: Reference
"""
self._link_aggregation_group = link_aggregation_group
@property
def mtu(self):
"""
Gets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:return: The mtu of this Subnet.
:rtype: int
"""
return self._mtu
@mtu.setter
def mtu(self, mtu):
"""
Sets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:param mtu: The mtu of this Subnet.
:type: int
"""
if mtu is not None and mtu > 9216:
raise ValueError("Invalid value for `mtu`, must be a value less than or equal to `9216`")
if mtu is not None and mtu < 1280:
raise ValueError("Invalid value for `mtu`, must be a value greater than or equal to `1280`")
self._mtu = mtu
@property
def prefix(self):
"""
Gets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:return: The prefix of this Subnet.
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""
Sets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:param prefix: The prefix of this Subnet.
:type: str
"""
self._prefix = prefix
@property
def services(self):
"""
Gets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces
:return: The services of this Subnet.
:rtype: list[str]
"""
return self._services
@services.setter
def services(self, services):
"""
Sets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces
:param services: The services of this Subnet.
:type: list[str]
"""
self._services = services
@property
def vlan(self):
"""
Gets the vlan of this Subnet.
VLAN ID
:return: The vlan of this Subnet.
:rtype: int
"""
return self._vlan
@vlan.setter
def vlan(self, vlan):
"""
Sets the vlan of this Subnet.
VLAN ID
:param vlan: The vlan of this Subnet.
:type: int
"""
self._vlan = vlan
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Subnet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
15e6250031174b4e039ad1e59a431cd4543e313f | 25e481ef7fba79285f4c8a7fa2e81c8b2b7f9cce | /saleor/product/urls.py | 792a3322de14affc3b6b5778fce32134a08c3d1e | [
"BSD-2-Clause"
] | permissive | arslanahmd/Ghar-Tameer | 59e60def48a14f9452dfefe2edf30e362878191d | 72401b2fc0079e6d52e844afd8fcf57122ad319f | refs/heads/master | 2023-01-31T04:08:26.288332 | 2018-06-07T18:02:01 | 2018-06-07T18:02:01 | 136,231,127 | 0 | 0 | NOASSERTION | 2023-01-11T22:21:42 | 2018-06-05T20:28:11 | Python | UTF-8 | Python | false | false | 423 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category'),
url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$',
views.product_add_to_cart, name="add-to-cart"),
]
| [
"[email protected]"
] | |
65784261a12349ca676e73e89cd86d05c20a0261 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/N/NicolaHughes/oecd_international_ngo_identifiers.py | 9f0a72e55aef8b2b9a0796f890d51a673f3013f3 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import scraperwiki
from bs4 import BeautifulSoup
url = "http://old.iatistandard.org/codelists/organisation_identifier_ingo"
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
table = soup.find("tbody")
for td in table.find_all("tr"):
code = td.find("td", "column-1").get_text()
country = td.find("td", "column-2").get_text()
abbrev = td.find("td", "column-3").get_text()
name = td.find("td", "column-4").get_text()
organisation = "international NGO"
data = {"Code": code, "Country": country, "Abbreviation": abbrev, "Name": name, "Organisation_type": organisation}
scraperwiki.sqlite.save(["Name"], data)import scraperwiki
from bs4 import BeautifulSoup
url = "http://old.iatistandard.org/codelists/organisation_identifier_ingo"
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
table = soup.find("tbody")
for td in table.find_all("tr"):
code = td.find("td", "column-1").get_text()
country = td.find("td", "column-2").get_text()
abbrev = td.find("td", "column-3").get_text()
name = td.find("td", "column-4").get_text()
organisation = "international NGO"
data = {"Code": code, "Country": country, "Abbreviation": abbrev, "Name": name, "Organisation_type": organisation}
scraperwiki.sqlite.save(["Name"], data) | [
"[email protected]"
] | |
2d2260b9fe36da45dcd750354ca6cccbdf07ab61 | d49f90aac10fe247d3dab988860c47fc4cb6a38e | /test_coverage_site/wsgi.py | 874b39ec3bf368563be03bcda8346571b0c4c8d2 | [] | no_license | slobdell/test-coverage-site | ae12220c8302f683f5513372920d471897b3c07f | 95c9caff146061f602a1283e3bf87486daf9a8c6 | refs/heads/master | 2021-01-16T19:20:47.312548 | 2015-05-04T23:41:40 | 2015-05-04T23:41:40 | 34,973,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """
WSGI config for test_coverage_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_coverage_site.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
376eab9712e3e1c83a72a34acad7e20b5e24faf4 | c17ecfd1294c1d0834c96cf35ae76feb2bba75a6 | /detectron2/utils/visualizer.py | 7b9e774780e02b03f79cf953048b790a94e2b1c4 | [
"Apache-2.0"
] | permissive | Arithmeticjia/detectron2 | 6ccb116d9b84a6d98dc75cfb53593d3cec2c2057 | 06b14b4eae81c23acdb18e3e18c1a1fbc5dbb26c | refs/heads/master | 2020-08-16T19:17:28.933746 | 2019-10-16T12:20:58 | 2019-10-16T12:20:58 | 215,541,226 | 0 | 0 | Apache-2.0 | 2019-10-16T12:20:24 | 2019-10-16T12:20:23 | null | UTF-8 | Python | false | false | 38,794 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import colorsys
import numpy as np
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks
from .colormap import random_color
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
Attributes:
IMAGE: Picks a random color for every instance and overlay segmentations with low opacity.
SEGMENTATION: Let instances of the same category have similar colors, and overlay them with
high opacity. This provides more attention on the quality of segmentation.
IMAGE_BW: same as IMAGE, but convert all areas without masks to gray-scale.
"""
IMAGE = 0
SEGMENTATION = 1
IMAGE_BW = 2
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (height, width), m.shape
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
res = [x for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
def __init__(self, panoptic_seg, segments_info):
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
Returns:
list[str] or None
"""
labels = None
if class_names is not None and len(class_names) > 1:
labels = [class_names[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3).
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
ax.set_xlim(0.0, self.width)
ax.set_ylim(self.height)
self.fig = fig
self.ax = ax
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
if filepath.endswith(".jpg") or filepath.endswith(".png"):
# faster than matplotlib's imshow
cv2.imwrite(filepath, self.get_image()[:, :, ::-1])
else:
# support general formats (e.g. pdf)
self.ax.imshow(self.img, interpolation="nearest")
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
if (self.width, self.height) != (width, height):
img = cv2.resize(self.img, (width, height))
else:
img = self.img
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
# imshow is slow. blend manually (still quite slow)
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
try:
import numexpr as ne # fuse them with numexpr
visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
except ImportError:
alpha = alpha.astype("float32") / 255.0
visualized_image = img * (1 - alpha) + rgb * alpha
visualized_image = visualized_image.astype("uint8")
return visualized_image
class Visualizer:
def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (MetadataCatalog): image metadata.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = predictions.pred_masks.numpy()
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in labels:
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg_predictions(
self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7
):
"""
Draw panoptic prediction results on an image.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(pred.non_empty_mask())
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)
try:
colors = [random_color(rgb=True, maximum=1) for k in category_ids]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
def draw_dataset_dict(self, dic):
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos]
labels = [x["category_id"] for x in annos]
names = self.metadata.get("thing_classes", None)
if names:
labels = [names[i] for i in labels]
labels = [i + ("|crowd" if a.get("iscrowd", 0) else "") for i, a in zip(labels, annos)]
self.overlay_instances(labels=labels, boxes=boxes, masks=masks, keypoints=keypts)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
sem_seg = cv2.imread(dic["sem_seg_file_name"], cv2.IMREAD_GRAYSCALE)
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
"""
Args:
boxes (Boxes or ndarray): either a :class:`Boxes` or a Nx4 numpy array
of XYXY_ABS format for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* `structures.masks.PolygonMasks`, `structures.masks.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > _KEYPOINT_THRESHOLD:
self.draw_circle((x, y), color=_RED)
keypoint_name = self.metadata.keypoint_names[idx]
visible[keypoint_name] = (x, y)
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self, text, position, *, font_size=None, color="g", horizontal_alignment="center"
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
Returns:
output (VisImage): image object with line drawn.
"""
linewidth = max(self._default_font_size / 3, 1)
self.output.ax.add_line(
mpl.lines.Line2D(x_data, y_data, linewidth=linewidth * self.output.scale, color=color)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn in the object's center of mass.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component small than this will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
if area_threshold is None:
area_threshold = 4096
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < area_threshold:
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba)
if text is not None and has_valid_segment:
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to a Nx4 array.
"""
if isinstance(boxes, Boxes):
return boxes.tensor.numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
| [
"[email protected]"
] | |
7fea7b0f0a2dc5011bde9c1504e953d155324690 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.pyi | ba8171b0111fc0f9461d642587b7be5a8fd91df1 | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 1,127 | pyi | from typing import Any
log: Any
class ValidatorsContainer:
pre_auth: Any
post_auth: Any
pre_token: Any
post_token: Any
def __init__(self, post_auth, post_token, pre_auth, pre_token) -> None: ...
@property
def all_pre(self): ...
@property
def all_post(self): ...
class GrantTypeBase:
error_uri: Any
request_validator: Any
default_response_mode: str
refresh_token: bool
response_types: Any
def __init__(self, request_validator: Any | None = ..., **kwargs) -> None: ...
def register_response_type(self, response_type) -> None: ...
def register_code_modifier(self, modifier) -> None: ...
def register_token_modifier(self, modifier) -> None: ...
def create_authorization_response(self, request, token_handler) -> None: ...
def create_token_response(self, request, token_handler) -> None: ...
def add_token(self, token, token_handler, request): ...
def validate_grant_type(self, request) -> None: ...
def validate_scopes(self, request) -> None: ...
def prepare_authorization_response(self, request, token, headers, body, status): ...
| [
"[email protected]"
] | |
6eb33391091d6da8cb07157f81214fd33aad700a | 1424812c4f211d3d5e356e8b3889a689162062f3 | /arcade/python/65_calc_bonuses.py | f7049b69442fdc3417020fec21d30b0c0719e3bf | [] | no_license | nazomeku/codefights | cb7d3c40be0809695ec524a87c88dbebcf5b47bc | b23f6816f9b5b0720feac1c49c31163923e0a554 | refs/heads/master | 2021-01-22T12:49:35.905165 | 2017-11-21T19:03:37 | 2017-11-21T19:03:37 | 102,357,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | """Given the bonuses the player got, your task is to return his final
score for the level."""
def calc_bonuses(bonuses, n):
it = (x for x in bonuses)
res = 0
try:
for _ in range(n):
res += next(it)
except StopIteration:
res = 0
return res
| [
"[email protected]"
] | |
a68323c8d34f11fe9205e298a3142449f9f35bd1 | 999879f8d18e041d7fa313132408b252aded47f8 | /01-codes/scipy-master/benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py | 948d35ef731d2bb98ed71725b8de543e3329add0 | [
"MIT"
] | permissive | QPanProjects/Surrogate-Model | ebcaf05728e82dcbcd924c2edca1b490ab085173 | 848c7128201218b0819c9665e2cec72e3b1d29ac | refs/heads/master | 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,799 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
class Zacharov(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
class ZeroSum(Benchmark):
r"""
ZeroSum objective function.
This class defines the ZeroSum [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ZeroSum}}(x) = \begin{cases}
0 & \textrm{if} \sum_{i=1}^n x_i = 0 \\
1 + \left(10000 \left |\sum_{i=1}^n x_i\right|
\right)^{0.5} & \textrm{otherwise}
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` where :math:`\sum_{i=1}^n x_i = 0`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
if abs(sum(x)) < 3e-16:
return 0.0
return 1.0 + (10000.0 * abs(sum(x))) ** 0.5
class Zettl(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
class Zimmerman(Benchmark):
r"""
Zimmerman objective function.
This class defines the Zimmerman [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x))
\textrm{sgn}(Zh2(x)), Zp(Zh3(x))
\textrm{sgn}(Zh3(x)),
Zp(-x_1)\textrm{sgn}(x_1),
Zp(-x_2)\textrm{sgn}(x_2) \right]
Where, in this exercise:
.. math::
\begin{cases}
Zh1(x) = 9 - x_1 - x_2 \\
Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \\
Zh3(x) = x_1x_2 - 14 \\
Zp(t) = 100(1 + t)
\end{cases}
Where :math:`x` is a vector and :math:`t` is a scalar.
Here, :math:`x_i \in [0, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [7, 2]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO implementation from Gavana
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.custom_bounds = ([0.0, 8.0], [0.0, 8.0])
self.global_optimum = [[7.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),
Zp(-x[0]) * sign(x[0]),
Zp(-x[1]) * sign(x[1]))
class Zirilli(Benchmark):
r"""
Zettl objective function.
This class defines the Zirilli [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.3523` for :math:`x = [-1.0465, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.0465, 0.0]]
self.fglob = -0.35238603
def fun(self, x, *args):
self.nfev += 1
return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2
| [
"[email protected]"
] | |
3b2216beb520976143f093e59125545a5bcd1e06 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_045/ch86_2019_06_06_19_48_22_074445.py | cf67642da9ca8f73290bdf4b1207220f46923a28 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | with open('dados.csv','r') as arquivo:
t=arquivo.replace(',','')
with open('dados.tsv','a') as arquivo:
arquivo=t | [
"[email protected]"
] | |
9324e5dad5c5d41d9712524e5664e5b589a4683a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02735/s229136257.py | 7525f09b2d7be32d9b898770a7896fc24d111e97 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | H,W = map(int,input().split())
s = []
for i in range(H):
S = list(input())
s.append(S)
dis = [[float("inf")] * W for i in range(H)]
if s[0][0] == "#":
dis[0][0] = 1
else:
dis[0][0] = 0
for i in range(H):
for j in range(W):
if i != H-1:
if s[i][j] == "." and s[i+1][j] == "#":
dis[i+1][j] = min(dis[i][j] + 1,dis[i+1][j])
else:
dis[i+1][j] = min(dis[i][j],dis[i+1][j])
if j != W-1:
if s[i][j] == "." and s[i][j+1] == "#":
dis[i][j+1] = min(dis[i][j] + 1,dis[i][j+1])
else:
dis[i][j+1] = min(dis[i][j],dis[i][j+1])
print (dis[-1][-1])
| [
"[email protected]"
] | |
9413a725aad2bb388d989390be97a3ad761e3fa6 | be1545a48c113cc497340a9d68ec05a9e8eedbe1 | /controller/access/models.py | 19cbb8274d16ffe408e5f9e7fb065812c1aa491b | [
"Apache-2.0"
] | permissive | helena-project/beetle | 318d82691391247c6542eb8067b094907cce0cde | 6f07d864c38ea6aed962263eca20ecf8436cfb4e | refs/heads/master | 2021-01-18T04:00:55.208424 | 2017-02-04T16:30:25 | 2017-02-04T16:30:25 | 59,785,058 | 16 | 2 | null | 2016-07-09T06:15:43 | 2016-05-26T21:41:51 | C++ | UTF-8 | Python | false | false | 9,348 | py | from __future__ import unicode_literals
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.db import models
from django.utils import timezone
from polymorphic.models import PolymorphicModel
from passlib.apps import django_context as pwd_context
from main.constants import NUM_HIGH_PRIORITY_LEVELS
# Create your models here.
def default_expire(self=None):
return timezone.now() + relativedelta(years=1)
class Rule(models.Model):
"""A rule specifying when a GATT server and client may communicate"""
# human searchable name
name = models.CharField(
max_length=100,
unique=True,
help_text="A human readable rule for searching and indexing.")
description = models.CharField(
max_length=500,
default="",
blank=True,
help_text="Description of the rule.")
# fields queried using SQL
service = models.ForeignKey(
"gatt.Service",
default="")
characteristic = models.ForeignKey(
"gatt.Characteristic",
default="")
from_principal = models.ForeignKey(
"beetle.Principal",
related_name="rule_from",
verbose_name="Server Principal",
help_text="Application or peripheral acting as server.")
from_gateway = models.ForeignKey(
"beetle.Gateway",
related_name="rule_from_gateway",
help_text="Gateway connected to server.")
to_principal = models.ForeignKey(
"beetle.Principal",
related_name="rule_to",
verbose_name="Client Principal",
help_text="Application or peripheral acting as client.")
to_gateway = models.ForeignKey(
"beetle.Gateway",
related_name="rule_to_gateway",
help_text="Gateway connected to client.")
PRIORITY_CHOICES = ((0, "Normal"),) + tuple((i, "High-%d" % i) for \
i in xrange(1, NUM_HIGH_PRIORITY_LEVELS+1))
priority = models.IntegerField(
default=0,
choices=PRIORITY_CHOICES)
# fields verified programatically
cron_expression = models.CharField(
max_length=100,
default="* * * * *",
verbose_name="Cron",
help_text="Standard crontab expression for when rule applies. "
+ "Format: Min Hour Day-of-Month Month Day-of-Week")
# permissions and connection information
properties = models.CharField(
max_length=100,
blank=True,
default="brwni",
verbose_name="Props",
help_text="Hint: brwni (broadcast, read, write, notify, indicate)")
exclusive = models.ForeignKey("Exclusive",
default=None,
null=True,
blank=True,
verbose_name="Excl",
help_text="Identifier to enforce exclusive access under.")
integrity = models.BooleanField(
default=True,
verbose_name="MAC",
help_text="Link layer integrity required.")
encryption = models.BooleanField(
default=True,
verbose_name="ENC",
help_text="Link layer encryption required.")
lease_duration = models.DurationField(
default=timedelta(minutes=15),
verbose_name="Lease",
help_text="Maximum amount of time results may be cached. Hint: HH:mm:ss")
# administrative fields
start = models.DateTimeField(
default=timezone.now)
expire = models.DateTimeField(
default=default_expire)
active = models.BooleanField(
default=True,
help_text="Rule will be considered?")
def static_lte(self, rhs):
"""
Returns whether self is more specific than rhs.
"""
def _lte(a, b):
return a.name == b.name or b.name == "*"
def _eq(a, b):
return a.name == b.name
def _lte_principal(a, b):
if a == b:
return True
if isinstance(b, PrincipalGroup):
if isinstance(a, VirtualDevice):
if b.members.filter(name=a.name).exists():
return True
else:
# subset?
pass
if b.name == "*":
return True
return False
def _lte_gateway(a, b):
if a == b:
return True
if isinstance(b, GatewayGroup):
if isinstance(a, BeetleGateway):
if b.members.filter(name=a.name).exists():
return True
else:
# subset?
pass
if b.name == "*":
return True
return False
if self.priority <= rhs.priority:
return True
svc_char_lte = False
if _lte(self.characteristic, rhs.characteristic):
if not _eq(self.characteristic, rhs.characteristic):
svc_char_lte = True
else:
svc_char_lte = _lte(self.service, rhs.service)
from_lte = False
if _lte_principal(self.from_principal, rhs.from_principal):
if not self.from_principal == rhs.from_principal:
from_lte = True
else:
from_lte = _lte_gateway(self.from_gateway, rhs.from_gateway)
to_lte = False
if _lte_principal(self.to_principal, rhs.to_principal):
if not self.to_principal == rhs.to_principal:
to_lte = True
else:
to_lte = _lte_gateway(self.to_gateway, rhs.to_gateway)
return svc_char_lte and from_lte and to_lte
def __unicode__(self):
return self.name
class RuleException(models.Model):
"""Deny, instead of allow, access. Used for attenuating existing rules."""
rule = models.ForeignKey(
"Rule",
help_text="Rule to invert")
from_principal = models.ForeignKey(
"beetle.Principal",
related_name="except_from",
verbose_name="Server Principal",
help_text="Application or peripheral acting as server.")
from_gateway = models.ForeignKey(
"beetle.Gateway",
related_name="except_from_gateway",
help_text="Gateway connected to server.")
to_principal = models.ForeignKey(
"beetle.Principal",
related_name="except_to",
verbose_name="Client Principal",
help_text="Application or peripheral acting as client.")
to_gateway = models.ForeignKey(
"beetle.Gateway",
related_name="except_to_gateway",
help_text="Gateway connected to client.")
def __unicode__(self):
return "(except) %s" % self.rule
class Exclusive(models.Model):
"""Group rules by exclusive access."""
NULL = -1
class Meta:
verbose_name_plural = "Exclusive"
description = models.CharField(
max_length=200,
blank=True,
help_text="Logical description of this group.")
default_lease = models.DurationField(
default=timedelta(hours=1),
help_text="Length of the lease. Hint: HH:mm:ss")
def __unicode__(self):
return self.description
class DynamicAuth(PolymorphicModel):
"""Base class for dynamic rules."""
class Meta:
verbose_name_plural = "Dynamic Auth"
ON_MAP = 1
ON_ACCESS = 2
REQUIRE_WHEN_CHOICES = (
(ON_MAP, "map"),
(ON_ACCESS, "access"),
)
rule = models.ForeignKey("Rule")
session_length = models.DurationField(
default=timedelta(hours=1),
help_text="Time before reauthentication. Hint: HH:mm:ss")
require_when = models.IntegerField(
default=ON_MAP,
choices=REQUIRE_WHEN_CHOICES,
help_text="When to trigger authentication.")
priority = models.IntegerField(
default=0,
editable=False,
help_text="A hidden field to ensure evaluation order")
class AdminAuth(DynamicAuth):
"""Prompt the admin for permission."""
class Meta:
verbose_name = "Admin Authorization"
verbose_name_plural = verbose_name
RULE_SCOPE = 1
SERVER_SCOPE = 2
SCOPE_CHOICES = (
(RULE_SCOPE, "rule"),
(SERVER_SCOPE, "server"),
)
message = models.CharField(
max_length=100,
blank=True,
help_text="Any additional message to present to the admin.")
admin = models.ForeignKey("beetle.Contact",
help_text="User with authority over this rule")
scope = models.IntegerField(
choices=SCOPE_CHOICES,
default=RULE_SCOPE,
help_text="Scope of the permission granted.")
def save(self, *args, **kwargs):
self.priority = 4
super(AdminAuth, self).save(*args, **kwargs)
def __unicode__(self):
return ""
class UserAuth(DynamicAuth):
"""Prompt the user for permission."""
class Meta:
verbose_name = "User Authentication"
verbose_name_plural = verbose_name
RULE_SCOPE = 1
SERVER_SCOPE = 2
SCOPE_CHOICES = (
(RULE_SCOPE, "rule"),
(SERVER_SCOPE, "server"),
)
message = models.CharField(
max_length=100,
blank=True,
help_text="Any additional message to present to the user.")
scope = models.IntegerField(
choices=SCOPE_CHOICES,
default=RULE_SCOPE,
help_text="Scope of the permission granted.")
def save(self, *args, **kwargs):
self.priority = 3
super(UserAuth, self).save(*args, **kwargs)
def __unicode__(self):
return ""
class PasscodeAuth(DynamicAuth):
"""Prompt user for a passcode."""
class Meta:
verbose_name = "Passcode Authentication"
verbose_name_plural = verbose_name
code = models.CharField(
max_length=200,
blank=True,
help_text="Enter a passcode for this rule.")
chash = models.CharField(
max_length=200,
blank=True,
editable=False,
help_text="Hashed passcode.")
hint = models.CharField(
max_length=500,
blank=True,
help_text="Passcode hint.")
def save(self, *args, **kwargs):
if self.code != "" and set(self.code) != set('*'):
self.chash = pwd_context.encrypt(self.code)
self.code = "*" * len(self.code)
elif self.code == "":
self.chash = ""
self.priority = 2
super(PasscodeAuth, self).save(*args, **kwargs)
def __unicode__(self):
return ""
class NetworkAuth(DynamicAuth):
"""Is the client from a specific IP or subnet."""
class Meta:
verbose_name = "Network Requirement"
verbose_name_plural = verbose_name
is_private = models.BooleanField(
default=False,
help_text="Allow access from any private IP.")
ip_address = models.CharField(
max_length=45,
default="127.0.0.1",
help_text="IP address to be matched exactly.")
def save(self, *args, **kwargs):
self.priority = 1
super(NetworkAuth, self).save(*args, **kwargs)
def __unicode__(self):
return self.ip_address
| [
"[email protected]"
] | |
e7c530faea76708714903acd3aee7445bd19cd73 | 6bf336bc8d6ba061e0c707bdd8595368dee4d27b | /algorithms/implementation/chocolate_feast.py | 167c71bac7ce61c4730dfdc418b5534bdd26088e | [
"MIT"
] | permissive | avenet/hackerrank | aa536214dbccf5a822a30ea226e1dbaac9afb243 | e522030a023af4ff50d5fc64bd3eba30144e006c | refs/heads/master | 2021-01-01T20:15:06.647873 | 2017-11-24T23:59:19 | 2017-11-24T23:59:19 | 98,801,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | n = int(input())
for i in range(n):
money, item_price, exchange_wrapper = [int(x) for x in input().split(' ')]
bought = money // item_price
answer = bought
wrappers = bought
while wrappers >= exchange_wrapper:
extra_items = wrappers // exchange_wrapper
answer += extra_items
wrappers = (wrappers % exchange_wrapper) + extra_items
print(int(answer))
| [
"[email protected]"
] | |
f9f5c7d65ca52302442ac3af9842e3e0c2658298 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4042/261004042.py | 270e503879fc4b824982174d8abaee91802da7e1 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,478 | py | from bots.botsconfig import *
from records004042 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'ME',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'TRN', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 3, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 10},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'AM1', MIN: 0, MAX: 99999},
{ID: 'DTP', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'LN1', MIN: 0, MAX: 99999},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'NTE', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 10},
{ID: 'DTP', MIN: 0, MAX: 2},
]},
{ID: 'NX1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NX2', MIN: 1, MAX: 99999},
{ID: 'DTP', MIN: 1, MAX: 7},
{ID: 'YNQ', MIN: 0, MAX: 16},
{ID: 'REF', MIN: 0, MAX: 5},
{ID: 'PDS', MIN: 0, MAX: 99999},
{ID: 'PDE', MIN: 0, MAX: 99999},
{ID: 'PEX', MIN: 0, MAX: 5},
{ID: 'REC', MIN: 0, MAX: 1},
{ID: 'REA', MIN: 0, MAX: 1},
{ID: 'III', MIN: 0, MAX: 30},
{ID: 'AM1', MIN: 0, MAX: 99999},
{ID: 'API', MIN: 0, MAX: 10},
{ID: 'AMT', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 10},
{ID: 'PCT', MIN: 0, MAX: 4},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'PWK', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'IN1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'III', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
2e93b69a71388f075c9082fa5e2e275073673eb8 | edeeb2fa2ece9f2f19a792bedbf842319e90e417 | /model/model.py | d77c10ee6a5a18179c282acd64d92cee86ba17fa | [] | no_license | ywl0911/TextCnn | 9461023cba84b24cf46d79c42720822baebe0c4c | 15af406ba05c71f9d18929ecd6d958216a1b53c2 | refs/heads/master | 2021-02-06T07:14:55.221200 | 2018-12-15T00:30:09 | 2018-12-15T00:30:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py | from __future__ import print_function
from model.text_cnn import TextCnn
import tensorflow as tf
from tensorflow.contrib import layers
import json
import os
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_path = os.path.join(path, 'config')
params_path = os.path.join(config_path, 'kaggle_movie_review.json')
with open(params_path, 'r') as fin:
options = json.load(fin)
config = tf.contrib.training.HParams(**options)
class Model:
def __init__(self):
pass
def model_fn(self, mode, features, labels, params):
self.dtype = tf.float32
self.mode = mode
self.params = params
self.loss, self.train_op, self.metrics, self.predictions = None, None, None, None
self._init_placeholder(features, labels)
self.build_graph()
# train mode: required loss and train_op
# eval mode: required loss
# predict mode: required predictions
return tf.estimator.EstimatorSpec(
mode=mode,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=self.metrics,
predictions={"prediction": self.predictions})
def _init_placeholder(self, features, labels):
self.input_data = features
if type(features) == dict:
self.input_data = features["input_data"]
self.targets = labels
def build_graph(self):
graph = TextCnn(self.mode)
output = graph.build(self.input_data)
self._build_prediction(output)
if self.mode != tf.estimator.ModeKeys.PREDICT:
self._build_loss(output)
self._build_optimizer()
self._build_metric()
def _build_loss(self, output):
self.loss = tf.losses.softmax_cross_entropy(
self.targets,
output,
scope="loss")
def _build_prediction(self, output):
tf.argmax(output[0], name='train/pred_0') # for print_verbose
self.predictions = tf.argmax(output, axis=1)
def _build_optimizer(self):
self.train_op = layers.optimize_loss(
self.loss, tf.train.get_global_step(),
optimizer='Adam',
learning_rate=config.train['learning_rate'],
summaries=['loss', 'learning_rate'],
name="train_op")
def _build_metric(self):
self.metrics = {
"accuracy": tf.metrics.accuracy(tf.argmax(self.targets, axis=1), self.predictions)
}
| [
"[email protected]"
] | |
ef769839a59990c6826e9653ae4644fea223c00c | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/protocol/spanning_tree/pvst/timer_config/__init__.py | 8ed3d677f21d649bba233810117b6f647a2c7bc7 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,366 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class timer_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /protocol/spanning-tree/pvst/timer-config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__hello_time','__forward_delay','__max_age',)
_yang_name = 'timer-config'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__max_age = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'6..40']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name="max-age", rest_name="max-age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the max age for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
self.__forward_delay = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'4..30']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="forward-delay", rest_name="forward-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the forward delay for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
self.__hello_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="hello-time", rest_name="hello-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the hello interval for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'protocol', u'spanning-tree', u'pvst', u'timer-config']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'protocol', u'spanning-tree', u'pvst']
def _get_hello_time(self):
"""
Getter method for hello_time, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/hello_time (uint32)
"""
return self.__hello_time
def _set_hello_time(self, v, load=False):
"""
Setter method for hello_time, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/hello_time (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="hello-time", rest_name="hello-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the hello interval for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_time must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="hello-time", rest_name="hello-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the hello interval for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__hello_time = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_time(self):
self.__hello_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..10']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2), is_leaf=True, yang_name="hello-time", rest_name="hello-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the hello interval for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
def _get_forward_delay(self):
"""
Getter method for forward_delay, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/forward_delay (uint32)
"""
return self.__forward_delay
def _set_forward_delay(self, v, load=False):
"""
Setter method for forward_delay, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/forward_delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_forward_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forward_delay() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'4..30']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="forward-delay", rest_name="forward-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the forward delay for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forward_delay must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'4..30']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="forward-delay", rest_name="forward-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the forward delay for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__forward_delay = t
if hasattr(self, '_set'):
self._set()
def _unset_forward_delay(self):
self.__forward_delay = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'4..30']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="forward-delay", rest_name="forward-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the forward delay for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
def _get_max_age(self):
"""
Getter method for max_age, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/max_age (uint32)
"""
return self.__max_age
def _set_max_age(self, v, load=False):
"""
Setter method for max_age, mapped from YANG variable /protocol/spanning_tree/pvst/timer_config/max_age (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_age is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_age() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'6..40']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name="max-age", rest_name="max-age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the max age for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_age must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'6..40']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name="max-age", rest_name="max-age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the max age for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__max_age = t
if hasattr(self, '_set'):
self._set()
def _unset_max_age(self):
self.__max_age = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'6..40']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name="max-age", rest_name="max-age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the max age for the spanning tree'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
hello_time = __builtin__.property(_get_hello_time, _set_hello_time)
forward_delay = __builtin__.property(_get_forward_delay, _set_forward_delay)
max_age = __builtin__.property(_get_max_age, _set_max_age)
_pyangbind_elements = {'hello_time': hello_time, 'forward_delay': forward_delay, 'max_age': max_age, }
| [
"[email protected]"
] | |
024c1ce5c3908c2e84353f3e0600a60e88bec43e | 59aed92059824381f34e1585e9600878d91cadb0 | /supervised_learning/0x0D-RNNs/5-bi_forward.py | 3bea5f82bfbcd5608895cf8bd8aabce38d00fccf | [] | no_license | jdarangop/holbertonschool-machine_learning | 3509eaa6d191f6887be9bbbb5a1df565b0b51cc8 | 75274394adb52d740f6cd4000cc00bbde44b9b72 | refs/heads/master | 2021-05-17T04:51:44.675655 | 2020-12-02T02:52:31 | 2020-12-02T02:52:31 | 250,633,089 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | #!/usr/bin/env python3
""" Bidirectional Cell Forward """
import numpy as np
class BidirectionalCell(object):
""" BidirectionalCell class. """
def __init__(self, i, h, o):
""" Initializer.
Args:
i: the dimensionality of the data.
h: the dimensionality of the hidden states.
o: the dimensionality of the outputs.
"""
self.Whf = np.random.normal(size=(i + h, h))
self.bhf = np.zeros((1, h))
self.Whb = np.random.normal(size=(i + h, h))
self.bhb = np.zeros((1, h))
self.Wy = np.random.normal(size=(i + h + o, o))
self.by = np.zeros((1, o))
def forward(self, h_prev, x_t):
""" calculates the hidden state in the forward
direction for one time step.
Args:
h_prev: (numpy.ndarray) contains the data input for the cell.
x_t: (numpy.ndarray) containing the previous hidden state.
Returns:
h_next: is the next hidden state.
"""
X = np.concatenate((h_prev, x_t), axis=1)
h_next = np.tanh(np.matmul(X, self.Whf) + self.bhf)
return h_next
| [
"[email protected]"
] | |
de4c00c73081f0dda8a034561dd5cb691964b608 | 38515045dbc990087c34a6847c6905020b5849b7 | /micro-benchmark/snippets/lists/slice/main.py | a6f22430847b4ca7ea8f2bb7a928c62f6c6cd7f8 | [
"Apache-2.0"
] | permissive | Tim-eyes/PyCG | 9dbb7867e9f6037869ec6bf1218397d986638836 | ef121c7ffb5ee06f4952fbe209b326bb7bf12647 | refs/heads/master | 2023-08-30T07:54:40.761229 | 2021-07-25T13:08:46 | 2021-07-25T13:08:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | def func1():
pass
def func2():
pass
def func3():
pass
ls = [func1, func2, func3]
ls2 = ls[1:3]
ls2[0]()
| [
"[email protected]"
] | |
579bff385dfa8e145f010cd9a5919a7d939ee332 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/ShopOrderModifyResult.py | 9082302886be32ebbbbaf87862cab8d4893c57bb | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,321 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ShopOrderModifyResult(object):
def __init__(self):
self._ext_infos = None
self._result_code = None
self._shop_id = None
self._store_id = None
@property
def ext_infos(self):
return self._ext_infos
@ext_infos.setter
def ext_infos(self, value):
self._ext_infos = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def store_id(self):
return self._store_id
@store_id.setter
def store_id(self, value):
self._store_id = value
def to_alipay_dict(self):
params = dict()
if self.ext_infos:
if hasattr(self.ext_infos, 'to_alipay_dict'):
params['ext_infos'] = self.ext_infos.to_alipay_dict()
else:
params['ext_infos'] = self.ext_infos
if self.result_code:
if hasattr(self.result_code, 'to_alipay_dict'):
params['result_code'] = self.result_code.to_alipay_dict()
else:
params['result_code'] = self.result_code
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.store_id:
if hasattr(self.store_id, 'to_alipay_dict'):
params['store_id'] = self.store_id.to_alipay_dict()
else:
params['store_id'] = self.store_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ShopOrderModifyResult()
if 'ext_infos' in d:
o.ext_infos = d['ext_infos']
if 'result_code' in d:
o.result_code = d['result_code']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'store_id' in d:
o.store_id = d['store_id']
return o
| [
"[email protected]"
] | |
78336ba51d32b62181a4ce2bb2a6dbf2229c8c53 | dde7259b842de982208caff054d9c9cb3c18c7be | /debug .rock.py | 71e80dfc08454845c7b70946650d47577162cdad | [] | no_license | pooja-pichad/more_excersise | d1137ab4b757e68bf8109405d7a8cf47e9a0bbfd | f9f94b04ad533a5a5ca983d33775b94af8bd146e | refs/heads/main | 2023-06-18T22:21:03.690142 | 2021-07-20T06:56:34 | 2021-07-20T06:56:34 | 387,700,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | from random import randint
def win():
print ('You win!')
def lose():
print ('You lose!')
while True:
player_choice = input('What do you pick? (rock, paper, scissors)')
player_choice.strip()
random_move = randint(0, 2)
moves = ['rock', 'paper', 'scissors']
computer_choice = moves[random_move]
print(computer_choice)
if player_choice == computer_choice:
print ('Draw!')
elif player_choice == 'rock' or computer_choice == 'scissors':
win()
elif player_choice== 'paper' or computer_choice == 'scissors':
lose()
elif player_choice == 'scissors' or computer_choice == 'paper':
win()
elif player_choice == 'scissors' or computer_choice == 'rock':
lose()
elif player_choice == 'paper' or computer_choice == 'rock':
win()
elif player_choice == 'rock' or computer_choice == 'paper':
lose()
aGain = input('Do you want to play again? (y or n)').strip()
if aGain == 'n':
break
# import random
# while True:
# user_action = input("Enter a choice (rock, paper, scissors): ")
# possible_actions = ["rock", "paper", "scissors"]
# computer_action = random.choice(possible_actions)
# print(f"\nYou chose {user_action}, computer chose {computer_action}.\n")
# if user_action == computer_action:
# print(f"Both players selected {user_action}. It's a tie!")
# elif user_action == "rock":
# if computer_action == "scissors":
# print("Rock smashes scissors! You win!")
# else:
# print("Paper covers rock! You lose.")
# elif user_action == "paper":
# if computer_action == "rock":
# print("Paper covers rock! You win!")
# else:
# print("Scissors cuts paper! You lose.")
# elif user_action == "scissors":
# if computer_action == "paper":
# print("Scissors cuts paper! You win!")
# else:
# print("Rock smashes scissors! You lose.")
# play_again = input("Play again? (y/n): ")
# if play_again.lower() != "y":
# break
| [
"[email protected]"
] | |
b1ae0c007b11135e8f16664aa1a43c6d13c5c89c | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/9276216/snippet.py | 27e3f6d7e286538a706d09b067ed2b79a247d7b7 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,494 | py | #!/usr/bin/python
''' Python command line argument example using argparse module
Example output:
./parser.py --server=pyserver --port=8080,443,25,22,21 --keyword=pyisgood
Server name: [ pyserver ]
Port: [ 8080 ]
Port: [ 443 ]
Port: [ 25 ]
Port: [ 22 ]
Port: [ 21 ]
Keyword assigned: [ pyisgood ]
'''
import argparse
__author__ = 'Jason Vasquez Orona'
def get_args():
'''This function parses and return arguments passed in'''
# Assign description to the help doc
parser = argparse.ArgumentParser(
description='Script retrieves schedules from a given server')
# Add arguments
parser.add_argument(
'-s', '--server', type=str, help='Server name', required=True)
parser.add_argument(
'-p', '--port', type=str, help='Port number', required=True, nargs='+')
parser.add_argument(
'-k', '--keyword', type=str, help='Keyword search', required=False, default=None)
# Array for all arguments passed to script
args = parser.parse_args()
# Assign args to variables
server = args.server
port = args.port[0].split(",")
keyword = args.keyword
# Return all variable values
return server, port, keyword
# Run get_args()
# get_args()
# Match return values from get_arguments()
# and assign to their respective variables
server, port, keyword = get_args()
# Print the values
print "\nServer name: [ %s ]\n" % server
for p in port:
print "Port: [ %s ]" % p
print "\nKeyword assigned: [ %s ]\n" % keyword
| [
"[email protected]"
] | |
746f58db6baa85c1093e025491da54c87739e1c6 | dd6c5f31a2a14f610bd9ae7ebffa37586cfdf6fa | /jingfen_app/manage.py | 7a8b7fb2debbfdb45fa14d397a32c43df9c8696f | [] | no_license | wean/jingfen | 534945e8fe12686e25a3fd9788b29bca2ba49be1 | ed9cc88f57b83c5c77ff85fab58ddf5094f7793f | refs/heads/master | 2020-04-05T07:11:46.721676 | 2018-04-10T05:21:33 | 2018-04-10T05:21:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | #! /usr/bin/python
# -*- coding:utf-8 -*-
# @Time : 2018/3/12 下午9:21
# from jingfen.jingfen_app import create_app, d
# b
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'hello world!'
pass
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
6df0cb04a29969a2e6fb06c94560c3eb5217a2e8 | 5e22728a45dc131b5abcdde3c10928557177898b | /tests/nb_test.py | 0634d44b1e6eb8abdd7c5d0d810154d9280e95fc | [
"MIT"
] | permissive | microsoft/msticnb | 74fc9636964be68900702ee0c85b0c992f0779ad | cefc4ee5a22285d33e7abd91371c617fe42f8129 | refs/heads/main | 2023-06-30T02:00:29.253130 | 2023-03-16T20:14:27 | 2023-03-16T20:14:27 | 250,417,186 | 25 | 11 | MIT | 2023-03-16T20:14:28 | 2020-03-27T02:00:17 | Python | UTF-8 | Python | false | false | 3,942 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Notebooklet for Host Summary."""
from typing import Any, Dict, Iterable, Optional
import pandas as pd
from msticpy.common.timespan import TimeSpan
from msticpy.datamodel import entities
from msticnb import nb_metadata
from msticnb._version import VERSION
from msticnb.common import nb_print, set_text
from msticnb.notebooklet import NBMetadata, Notebooklet, NotebookletResult
__version__ = VERSION
__author__ = "Ian Hellen"
_CLS_METADATA: NBMetadata
_CELL_DOCS: Dict[str, Any]
_CLS_METADATA, _CELL_DOCS = nb_metadata.read_mod_metadata(__file__, __name__)
# pylint: disable=too-few-public-methods
class TstSummaryResult(NotebookletResult):
"""Test Results."""
def __init__(
self,
description: Optional[str] = None,
timespan: Optional[TimeSpan] = None,
notebooklet: Optional["Notebooklet"] = None,
):
"""
Create new Notebooklet result instance.
Parameters
----------
description : Optional[str], optional
Result description, by default None
timespan : Optional[TimeSpan], optional
TimeSpan for the results, by default None
notebooklet : Optional[, optional
Originating notebooklet, by default None
"""
super().__init__(description, timespan, notebooklet)
self.host_entity: entities.Host = None
self.related_alerts: pd.DataFrame = None
self.related_bookmarks: pd.DataFrame = None
self.default_property: pd.DataFrame = None
self.optional_property: pd.DataFrame = None
# pylint: disable=too-few-public-methods
class TstNBSummary(Notebooklet):
"""Test Notebooklet class."""
metadata = _CLS_METADATA
__doc__ = nb_metadata.update_class_doc(__doc__, metadata)
_cell_docs = _CELL_DOCS
# pylint: disable=too-many-branches
@set_text(docs=_CELL_DOCS, key="run") # noqa MC0001
def run(
self,
value: Any = None,
data: Optional[pd.DataFrame] = None,
timespan: Optional[TimeSpan] = None,
options: Optional[Iterable[str]] = None,
**kwargs,
) -> TstSummaryResult:
"""Return host summary data."""
super().run(
value=value, data=data, timespan=timespan, options=options, **kwargs
)
# pylint: disable=attribute-defined-outside-init
self._last_result = TstSummaryResult(
notebooklet=self, description=self.metadata.description, timespan=timespan
)
host_entity = entities.Host(HostName="testhost")
_test_inline_text(host_entity)
_test_yaml_text(host_entity)
self._last_result.host_entity = host_entity
self._last_result.related_alerts = pd.DataFrame()
self._last_result.related_bookmarks = pd.DataFrame()
if "default_opt" in self.options:
self._last_result.default_property = pd.DataFrame()
if "optional_opt" in self.options:
self._last_result.optional_property = pd.DataFrame()
return self._last_result
# %%
# Get IP Information from Heartbeat
@set_text(
title="Host Entity details",
hd_level=3,
text="""
These are the host entity details gathered from Heartbeat
and, if applicable, AzureNetworkAnalytics and Azure management
API.
The data shows OS information, IP Addresses assigned the
host and any Azure VM information available.
""",
md=True,
)
def _test_inline_text(host_entity):
nb_print("TestInline")
nb_print(host_entity)
@set_text(docs=_CELL_DOCS, key="show_host_entity")
def _test_yaml_text(host_entity):
nb_print("TestYaml")
nb_print(host_entity)
| [
"[email protected]"
] | |
41fc9b8079910692b8adde95dc4604182ba9dbc7 | 58ade65dffc7cbe103d93d7c769096a20d9f9815 | /src/smach_based_introspection_framework/offline_part/anomaly_classification_feature_selection/msg_filters_with_scaling_and_clip.py | a375c305a1c8e01a004943519b8b7a4c2b4adc1a | [
"BSD-3-Clause"
] | permissive | birlrobotics/smach_based_introspection_framework | 2cff69ecec030a5b5046dea99f9e15105f52361b | f16742339cddfc86effba4dbf6e5062304704b89 | refs/heads/master | 2021-05-09T12:02:26.946473 | 2019-05-29T02:46:47 | 2019-05-29T02:46:47 | 119,001,821 | 7 | 1 | null | 2018-07-05T04:58:40 | 2018-01-26T03:37:58 | Python | UTF-8 | Python | false | false | 634 | py | from rostopics_to_timeseries import TopicMsgFilter
import numpy as np
class TactileStaticStdScaleClipMaxFilter(TopicMsgFilter):
def __init__(self):
super(TactileStaticStdScaleClipMaxFilter, self).__init__()
def convert(self, msg):
ret = np.array([
np.std(msg.taxels[0].values),
np.std(msg.taxels[1].values),
])
return [np.clip(ret/60.0, -1, 1).max()]
@staticmethod
def vector_size():
return 1
@staticmethod
def vector_meaning():
return [
'tactile_static_data.left.std.clip(ret/60.0, -1, 1).max()', \
]
| [
"[email protected]"
] | |
724e03976eb11b0ba1df23167f2f294516dc6dad | a6719f4815ff41d3a1f09e9a63a64c4582d03702 | /error_handling/try-except.py | 881cf6cfddef1e2ef9f597573159206f16b57db8 | [
"MIT"
] | permissive | thanh-vt/python-basic-programming | 8136007b8435dae6339ae33015fe536e21b19d1d | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | refs/heads/main | 2023-01-30T12:57:36.819687 | 2020-12-13T17:27:05 | 2020-12-13T17:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | try:
print(x)
except NameError:
print("Variable x is not defined")
except Exception as ex:
print(ex.__cause__)
print("An exception occurred")
try:
print("Hello")
except Exception as ex:
print(ex)
print("Something went wrong")
else:
print("Nothing went wrong")
| [
"[email protected]"
] | |
5ed01a03bd11edf9b7d88470801ecb14f19ac080 | a1119965e2e3bdc40126fd92f4b4b8ee7016dfca | /trunk/repy/tests/ut_repytests_veryslownetsend-testsend.py | 29cb9e30a07108c0c3fa576534605471fbacbae1 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | SeattleTestbed/attic | 0e33211ddf39efdbcf5573d4fc7fa5201aa7310d | f618a962ce2fd3c4838564e8c62c10924f5df45f | refs/heads/master | 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 | Python | UTF-8 | Python | false | false | 331 | py | #pragma repy restrictions.veryslownetsend
def foo(ip,port,mess, ch):
stopcomm(ch)
exitall()
if callfunc == 'initialize':
ip = getmyip()
recvmess(ip,<messport>,foo)
sleep(.1)
sendmess(ip,<messport>,'hi')
sendmess(ip,<messport>,'Hello, this is too long of a message to be received in such a short time')
print "hi"
| [
"USER@DOMAIN"
] | USER@DOMAIN |
715954a2482e5085e098e307bb78aba19ebfadb5 | 38797130eaa7f4eb24ba54d5785820b6d0c40672 | /axsemantics/resources.py | 92f6310fbb3dbc4e9711da515f493a71b26d0e80 | [
"MIT"
] | permissive | niklaskks/axsemantics-python | 92cf438fc3250da68e605efd782935109e84994c | 5f80ec8e91be040a7fc409b44f321f666a351396 | refs/heads/master | 2021-01-19T18:01:02.925525 | 2016-05-23T09:15:36 | 2016-05-23T09:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | from axsemantics import constants
from axsemantics.base import (
APIResource,
ListResource,
)
from axsemantics.mixins import(
ContentGenerationMixin,
CreateableMixin,
DeleteableMixin,
ListableMixin,
UpdateableMixin,
)
from axsemantics.utils import create_object
class ThingList(ListResource):
class_name = 'thing'
def __init__(self, cp_id, *args, **kwargs):
self.cp_id = cp_id
super(ThingList, self).__init__(*args, **kwargs)
def __next__(self):
if self.current_index >= len(self.current_list):
if self.next_page:
self._update()
else:
raise StopIteration
self.current_index += 1
return create_object(self.current_list[self.current_index - 1], api_token=self.api_token, _type=self.class_name, cp_id=self.cp_id)
class Thing(CreateableMixin, UpdateableMixin, DeleteableMixin, ListableMixin, ContentGenerationMixin, APIResource):
class_name = 'thing'
required_fields = ['uid', 'name', 'content_project']
list_class = ThingList
def __init__(self, cp_id=None, **kwargs):
super(Thing, self).__init__(**kwargs)
self['content_project'] = cp_id
def instance_url(self):
url = '/{}/content-project/{}/thing/'.format(
constants.API_VERSION,
self['content_project'],
)
if self['id']:
url += '{}/'.format(self['id'])
return url
class ContentProject(CreateableMixin, DeleteableMixin, ListableMixin, ContentGenerationMixin, APIResource):
class_name = 'content-project'
required_fields = ['name', 'engine_configuration']
def __init__(self, api_token=None, **kwargs):
super(ContentProject, self).__init__(api_token=api_token, **kwargs)
def things(self):
if self['id']:
thing_url = '{}thing/'.format(self.instance_url())
return ThingList(cp_id=self['id'], api_token=self.api_token, class_name=self.class_name, initial_url=thing_url)
class ContentProjectList(ListResource):
initial_url = ContentProject.class_url()
class_name = 'content-project'
| [
"[email protected]"
] | |
f7a79c683a39a157ca334486bf2720da61880c5f | f0fa96d39a66c3ddaae4266442a13ec3feb7a462 | /binary_search/ceil_ele_sorted_arr.py | b38b7525c50fb69b6e0caad422b862b18198fcdd | [] | no_license | ashishgupta2014/problem_solving_practices | 14d587e98d9996a95efe822335ca4baccb39b1a1 | bc4f4b07e1e33273010e34428e0c31d2d6656c14 | refs/heads/master | 2023-04-26T03:47:40.766508 | 2021-06-07T04:55:52 | 2021-06-07T04:55:52 | 298,063,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | def binay_search_itr(arr, x):
"""
https://www.geeksforgeeks.org/ceiling-in-a-sorted-array/
Given a sorted array and a value x, the ceiling of x is the smallest element in array greater than or equal to x,
and the floor is the greatest element smaller than or equal to x. Assume than the array is sorted in non-decreasing
order. Write efficient functions to find floor and ceiling of x.
Examples :
For example, let the input array be {1, 2, 8, 10, 10, 12, 19}
For x = 0: floor doesn't exist in array, ceil = 1
For x = 1: floor = 1, ceil = 1
For x = 5: floor = 2, ceil = 8
For x = 20: floor = 19, ceil doesn't exist in array
In below methods, we have implemented only ceiling search functions. Floor search can be implemented in the same way.
Method 1 (Linear Search)
Algorithm to search ceiling of x:
1) If x is smaller than or equal to the first element in array then return 0(index of first element)
2) Else Linearly search for an index i such that x lies between arr[i] and arr[i+1].
3) If we do not find an index i in step 2, then return -1
:param arr:
:param x:
:return:
"""
l = 0
h = len(arr) - 1
res = -1
while l <= h:
m = l + (h - l // 2)
if arr[m] == x:
return arr[m]
elif arr[m] > x:
res = arr[m]
h = m - 1
else:
l = m + 1
return res
print(binay_search_itr([1, 2, 8, 10, 10, 12, 19], 5)) | [
"[email protected]"
] | |
f2db1d747f336b5f33ca131fd532125e465c57d1 | 12f83344cdfe561db39ad9106dbf263ccd919f7e | /Projects/miami_metro/social_discovery/pipeline_constants.py | f6e86c7a8ce6044eaf24b5533b5971606e998a59 | [] | no_license | TopWebGhost/Angular-Influencer | ebcd28f83a77a92d240c41f11d82927b98bcea9e | 2f15c4ddd8bbb112c407d222ae48746b626c674f | refs/heads/master | 2021-01-19T10:45:47.039673 | 2016-12-05T01:59:26 | 2016-12-05T01:59:26 | 82,214,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | # QUEUES for Pipeline modules categories
CREATORS_QUEUE_NAME = 'profiles_pipeline_creators'
CLASSIFIERS_QUEUE_NAME = 'profiles_pipeline_classifiers'
PROCESSORS_QUEUE_NAME = 'profiles_pipeline_processors'
UPGRADERS_QUEUE_NAME = 'profiles_pipeline_upgraders'
CONNECT_PROFILES_QUEUE_NAME = 'profiles_pipeline_connect_to_influencers'
# Queues for that youtube-link in profiles tasks.
YOUTUBE_CREATORS_QUEUE_NAME = 'profiles_pipeline_creators_youtube'
YOUTUBE_CLASSIFIERS_QUEUE_NAME = 'profiles_pipeline_classifiers_youtube'
YOUTUBE_PROCESSORS_QUEUE_NAME = 'profiles_pipeline_processors_youtube'
YOUTUBE_UPGRADERS_QUEUE_NAME = 'profiles_pipeline_upgraders_youtube'
YOUTUBE_PIPELINE_QUEUE_NAME = 'social_profiles_pipeline_youtube'
QUEUE_TO_REFETCH_PROFILES = 'social_profiles_refetch_queue'
# name of queue for pipelines' tasks (obsolete?)
PIPELINE_QUEUE_NAME = 'social_profiles_pipeline'
# for different types of reprocess logic
REPROCESS_PROFILES_QUEUE_NAME = 'reprocess_profiles'
# This is a value of minimum friends count of profile. Profiles with lesser friends will be skipped automatically.
# Default value is 1000
MINIMUM_FRIENDS_COUNT = 1000
def get_queue_name_by_pipeline_step(klassname=None):
"""
returns queue name for particular step of pipeline (simply according to naming)
:param klassname: name of pipeline's step
:return: name of queue to put task in
"""
if isinstance(klassname, str):
klassname = klassname.lower()
if 'haveyoutube' in klassname.lower():
if 'creator' in klassname:
return YOUTUBE_CREATORS_QUEUE_NAME
elif 'classifier' in klassname:
return YOUTUBE_CLASSIFIERS_QUEUE_NAME
elif 'processor' in klassname:
return YOUTUBE_PROCESSORS_QUEUE_NAME
elif 'upgrader' in klassname:
return YOUTUBE_UPGRADERS_QUEUE_NAME
else:
return YOUTUBE_PIPELINE_QUEUE_NAME
if 'creator' in klassname:
return CREATORS_QUEUE_NAME
elif 'classifier' in klassname:
return CLASSIFIERS_QUEUE_NAME
elif 'processor' in klassname:
return PROCESSORS_QUEUE_NAME
elif 'upgrader' in klassname:
return UPGRADERS_QUEUE_NAME
else:
return PIPELINE_QUEUE_NAME
return None
| [
"[email protected]"
] | |
63c74473adda43589e07e31deb7538094f80aea3 | 615e9d142587c965d4f593ce68cae1811824026d | /22-functions/javoblar-22-02.py | 5232d042489e863a3b22af62d546f0f804cd2c91 | [] | no_license | XurshidbekDavronov/python-darslar | 0100bb8ea61c355949e81d1d3f3b923befeb80c9 | 4fcf9a3e0c2facdedaed9b53ef806cdc0095fd9d | refs/heads/main | 2023-06-21T03:33:19.509225 | 2021-07-13T13:04:56 | 2021-07-13T13:04:56 | 377,176,205 | 1 | 0 | null | 2021-06-15T13:40:33 | 2021-06-15T13:40:32 | null | UTF-8 | Python | false | false | 324 | py | """
24/12/2020
Dasturlash asoslari
#22-dars: *args va **kwargs
Muallif: Anvar Narzullaev
Web sahifa: https://python.sariq.dev
"""
def talaba_info(ism, familiya, **kwargs):
kwargs['ism']=ism
kwargs['familiya']=familiya
return kwargs
talaba = talaba_info('olim','olimov',tyil=1995,fakultet='IT',yonalish='AT') | [
"[email protected]"
] | |
739ba7fb3598067a85c67fefdb82248cd9e6b11f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02410/s505213341.py | 11027f146f2932df5fc0f7919c70fcbe05860869 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | n,m=map(int,input().split())
A=[list(map(int,input().split())) for i in range(n)]
B=[int(input())for i in range(m)]
for i in range(n):
kotae=0
for j in range(m):
kotae+=A[i][j]*B[j]
print(kotae)
| [
"[email protected]"
] | |
a906220bf4be30eeb1f4fd9ee66dd0cae473407c | 03ff89c04cd325d3f7b4266c59e39011f5b466ba | /ogs5_transectplot/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py | d95fde26cc37f807aebc37a870c6b56dde666ca6 | [] | no_license | timohouben/python_scripts | dabd35a7778fc459beddd5286141d405af32be1c | 15c622645725560c6450cd31ff194aa95394bfc9 | refs/heads/master | 2021-02-03T21:14:29.916525 | 2020-02-27T15:02:04 | 2020-02-27T15:02:04 | 243,537,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236,265 | py | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
# ~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
"And",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"alphanums",
"alphas",
"alphas8bit",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"col",
"commaSeparatedList",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"downcaseTokens",
"empty",
"hexnums",
"htmlComment",
"javaStyleComment",
"line",
"lineEnd",
"lineStart",
"lineno",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"nums",
"oneOf",
"opAssoc",
"operatorPrecedence",
"printables",
"punc8bit",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"srange",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"upcaseTokens",
"withAttribute",
"indentedBlock",
"originalTextFor",
"ungroup",
"infixNotation",
"locatedExpr",
"withClass",
"CloseMatch",
"tokenMap",
"pyparsing_common",
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj, unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), "xmlcharrefreplace")
xmlcharref = Regex("&#\d+;")
xmlcharref.setParseAction(lambda t: "\\u" + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__, fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = "&><\"'"
to_symbols = ("&" + s + ";" for s in "amp gt lt quot apos".split())
for from_, to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if aname == "lineno":
return lineno(self.loc, self.pstr)
elif aname in ("col", "column"):
return col(self.loc, self.pstr)
elif aname == "line":
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
return "%s (at char %d), (line:%d, col:%d)" % (
self.msg,
self.loc,
self.lineno,
self.column,
)
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join(
(line_str[:line_column], markerString, line_str[line_column:])
)
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
# ~ class ReparseException(ParseBaseException):
# ~ """Experimental class - parse actions can raise this exception to cause
# ~ pyparsing to reparse the input string:
# ~ - with a modified input string, and/or
# ~ - with a modified start location
# ~ Set the values of the ReparseException in the constructor, and raise the
# ~ exception in a parse action to cause pyparsing to use the new string/location.
# ~ Setting the values as None causes no change to be made.
# ~ """
# ~ def __init_( self, newstring, restartLoc ):
# ~ self.newParseText = newstring
# ~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self, i):
self.tup = (self.tup[0], i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(
name
) # will always return a str, but use _ustr for consistency
self.__name = name
if not (
isinstance(toklist, (type(None), basestring, list))
and toklist in (None, "", [])
):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(), 0)
else:
self[name] = _ParseResultsWithOffset(
ParseResults(toklist[0]), 0
)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return not not self.__toklist
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::-1])
def _iterkeys(self):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues(self):
return (self[k] for k in self._iterkeys())
def _iteritems(self):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys(self):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values(self):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items(self):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys(self):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[name]])
else:
return ""
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other.__tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for (k, vlist) in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self):
return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
return (
"["
+ ", ".join(
_ustr(i) if isinstance(i, ParseResults) else repr(i)
for i in self.__toklist
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [
res.asList() if isinstance(res, ParseResults) else res
for res in self.__toklist
]
def asDict(self):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k, toItem(v)) for k, v in item_fn())
def copy(self):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict(
(v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist
)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [
res.asXML(
namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
out += [
res.asXML(
None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [
nl,
nextLevelIndent,
"<",
resTag,
">",
xmlBodyText,
"</",
resTag,
">",
]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (
len(self) == 1
and len(self.__tokdict) == 1
and next(iter(self.__tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent="", depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = "\n"
out.append(indent + _ustr(self.asList()))
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("%s%s- %s: " % (indent, (" " * depth), k))
if isinstance(v, ParseResults):
if v:
out.append(v.dump(indent, depth + 1))
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (depth)),
i,
indent,
(" " * (depth + 1)),
vv.dump(indent, depth + 1),
)
)
else:
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (depth)),
i,
indent,
(" " * (depth + 1)),
_ustr(vv),
)
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self.__toklist,
(
self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name,
),
)
def __setstate__(self, state):
self.__toklist = state[0]
(self.__tokdict, par, inAccumNames, self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return dir(type(self)) + list(self.keys())
collections.MutableMapping.register(ParseResults)
def col(loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1 : nextCR]
else:
return strg[lastCR + 1 :]
def _defaultStartDebugAction(instring, loc, expr):
print(
(
"Match "
+ _ustr(expr)
+ " at loc "
+ _ustr(loc)
+ "(%d,%d)" % (lineno(loc, instring), col(loc, instring))
)
)
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
# ~ 'decorator to trim function calls to match the arity of the target'
# ~ def _trim_arity(func, maxargs=3):
# ~ if func in singleArgBuiltins:
# ~ return lambda s,l,t: func(t)
# ~ limit = 0
# ~ foundArity = False
# ~ def wrapper(*args):
# ~ nonlocal limit,foundArity
# ~ while 1:
# ~ try:
# ~ ret = func(*args[limit:])
# ~ foundArity = True
# ~ return ret
# ~ except TypeError:
# ~ if limit == maxargs or foundArity:
# ~ raise
# ~ limit += 1
# ~ continue
# ~ return wrapper
# this version is Python 2.x-3.x cross-compatible
"decorator to trim function calls to match the arity of the target"
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s, l, t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3, 5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3, 5, 0) else -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0] :])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars(chars):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = (
True
) # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = (
True
) # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self, "exception"):
self.exception.msg = self.errmsg
return self
def setResultsName(self, name, listAllMatches=False):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction(self, *fns, **kwargs):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s, l, t):
if not bool(_trim_arity(fn)(s, l, t)):
raise exc_type(s, l, msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
debugging = self.debug # and doActions )
if debugging or self.failAction:
# ~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
# ~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except ParseBaseException as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# ~ print ("Matched",self,"->",retTokens.asList())
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString(self, instring):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults(
[t for t, s, e in self.scanString(instring, maxMatches)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t, s, e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return And([self, other])
def __radd__(self, other):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return And([self, And._ErrorStop(), other])
def __rsub__(self, other):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other - self
def __mul__(self, other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply 'ParserElement' and ('%s','%s') objects",
type(other[0]),
type(other[1]),
)
else:
raise TypeError(
"cannot multiply 'ParserElement' and '%s' objects", type(other)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n - 1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other | self
def __xor__(self, other):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other ^ self
def __and__(self, other):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Each([self, other])
def __rand__(self, other):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other & self
def __invert__(self):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny(self)
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress(self):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore(self, other):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (
startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction,
)
self.debug = True
return self
def setDebug(self, flag=True):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions(
_defaultStartDebugAction,
_defaultSuccessDebugAction,
_defaultExceptionDebugAction,
)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=[]):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self, other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement, self) == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self, other):
return self == other
def __rne__(self, other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(
self,
tests,
parseAll=True,
comment="#",
fullDump=True,
printResults=True,
failureTests=False,
):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ["\n".join(comments), t]
comments = []
try:
t = t.replace(r"\n", "\n")
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if "\n" in t:
out.append(line(pe.loc, t))
out.append(" " * (col(pe.loc, t) - 1) + "^" + fatal)
else:
out.append(" " * pe.loc + "^" + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append("")
print("\n".join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__(self):
super(Token, self).__init__(savelist=False)
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__(self):
super(Empty, self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self):
super(NoMatch, self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Literal; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
# ~ @profile
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and (
self.matchLen == 1 or instring.startswith(self.match, loc)
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(self, matchString, identChars=None, caseless=False):
super(Keyword, self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Keyword; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if (
(instring[loc : loc + self.matchLen].upper() == self.caselessmatch)
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
)
and (loc == 0 or instring[loc - 1].upper() not in self.identChars)
):
return loc + self.matchLen, self.match
else:
if (
instring[loc] == self.firstMatchChar
and (self.matchLen == 1 or instring.startswith(self.match, loc))
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
)
and (loc == 0 or instring[loc - 1] not in self.identChars)
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword, self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__(self, matchString):
super(CaselessLiteral, self).__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__(self, matchString, identChars=None):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
def parseImpl(self, instring, loc, doActions=True):
if (instring[loc : loc + self.matchLen].upper() == self.caselessmatch) and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch, self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (
self.match_string,
self.maxMismatches,
)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], self.match_string)
):
src, mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = self.match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__(
self,
initChars,
bodyChars=None,
min=1,
max=0,
exact=0,
asKeyword=False,
excludeChars=None,
):
super(Word, self).__init__()
if excludeChars:
initChars = "".join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = "".join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if " " not in self.initCharsOrig + self.bodyCharsOrig and (
min == 1 and max == 0 and exact == 0
):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % (
re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
else:
self.reString = "[%s][%s]*" % (
_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
def parseImpl(self, instring, loc, doActions=True):
if self.re:
result = self.re.match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not (instring[loc] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start > 0 and instring[start - 1] in bodychars) or (
loc < instrlen and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(Word, self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4] + "..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:(%s,%s)" % (
charsAsStr(self.initCharsOrig),
charsAsStr(self.bodyCharsOrig),
)
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__(self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn(
"null string passed to Regex; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning,
stacklevel=2,
)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = self.reString = str(pattern)
self.flags = flags
else:
raise ValueError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = self.re.match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc, ret
def __str__(self):
try:
return super(Regex, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__(
self,
quoteChar,
escChar=None,
escQuote=None,
multiline=False,
unquoteResults=True,
endQuoteChar=None,
convertWhitespaceEscapes=True,
):
super(QuotedString, self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn(
"quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2
)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn(
"endQuoteChar cannot be the empty string",
SyntaxWarning,
stacklevel=2,
)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r"%s(?:[^%s%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
else:
self.flags = 0
self.pattern = r"%s(?:[^%s\n\r%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
if len(self.endQuoteChar) > 1:
self.pattern += (
"|(?:"
+ ")|(?:".join(
"%s[^%s]"
% (
re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
if escQuote:
self.pattern += r"|(?:%s)" % re.escape(escQuote)
if escChar:
self.pattern += r"|(?:%s.)" % re.escape(escChar)
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
self.pattern += r")*%s" % re.escape(self.endQuoteChar)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning,
stacklevel=2,
)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re.match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, basestring):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
ws_map = {r"\t": "\t", r"\n": "\n", r"\f": "\f", r"\r": "\r"}
for wslit, wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, "\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super(QuotedString, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (
self.quoteChar,
self.endQuoteChar,
)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super(CharsNotIn, self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and (instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {" ": "<SPC>", "\t": "<TAB>", "\n": "<LF>", "\r": "<CR>", "\f": "<FF>"}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__()
self.matchWhite = ws
self.setWhitespaceChars(
"".join(c for c in self.whiteChars if c not in self.matchWhite)
)
# ~ self.leaveWhitespace()
self.name = "".join(White.whiteStrs[c] for c in self.matchWhite)
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if not (instring[loc] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super(_PositionToken, self).__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__(self, colno):
super(GoToColumn, self).__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super(LineStart, self).__init__()
self.errmsg = "Expected start of line"
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__(self):
super(StringStart, self).__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self):
super(StringEnd, self).__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars=printables):
super(WordEnd, self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [ParserElement._literalStringClass(exprs)]
elif isinstance(exprs, collections.Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def __getitem__(self, i):
return self.exprs[i]
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super(ParseExpression, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
return self.strRepr
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not (other.parseAction)
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not (other.parseAction)
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName(self, name, listAllMatches=False):
ret = super(ParseExpression, self).setResultsName(name, listAllMatches)
return ret
def validate(self, validateTrace=[]):
tmp = validateTrace[:] + [self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop, self).__init__(*args, **kwargs)
self.name = "-"
self.leaveWhitespace()
def __init__(self, exprs, savelist=True):
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars(self.exprs[0].whiteChars)
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # And( [ self, other ] )
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(Or, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _, e in matches:
try:
return e._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # Or( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # MatchFirst( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Optional)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, Optional)
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr for e in self.exprs if isinstance(e, ZeroOrMore)
]
self.multirequired = [
e.expr for e in self.exprs if isinstance(e, OneOrMore)
]
self.required = [
e
for e in self.exprs
if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(
instring, loc, "Missing one or more required elements (%s)" % missing
)
# add any unmatched Optionals, in case they have default values defined
matchOrder += [
e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt
]
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__(self, expr, savelist=False):
super(ParseElementEnhance, self).__init__(savelist)
if isinstance(expr, basestring):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=[]):
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super(ParseElementEnhance, self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr):
super(FollowedBy, self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__(self, expr):
super(NotAny, self).__init__(expr)
# ~ self.leaveWhitespace()
self.skipWhitespace = (
False
) # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__(self, expr, stopOn=None):
super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__(self, expr, default=_optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(self, other, include=False, ignore=None, failOn=None):
super(SkipTo, self).__init__(other)
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__(self, other=None):
super(Forward, self).__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=[]):
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward, self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__(self):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__(self, expr, savelist=False):
super(TokenConverter, self).__init__(expr) # , savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(self, expr, joinString="", adjacent=True):
super(Combine, self).__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr):
super(Group, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
raise
sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName("(len) " + _ustr(expr) + "...")
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("", 0, "")
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return _ustr(s)
def oneOf(strs, caseless=False, useRegex=True):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
parseElementClass = CaselessLiteral
else:
isequal = lambda a, b: a == b
masks = lambda a, b: b.startswith(a)
parseElementClass = Literal
symbols = []
if isinstance(strs, basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn(
"Invalid argument to oneOf, expected string or iterable",
SyntaxWarning,
stacklevel=2,
)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
# ~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols) == len("".join(symbols)):
return Regex(
"[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)
).setName(" | ".join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(
" | ".join(symbols)
)
except Exception:
warnings.warn(
"Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning,
stacklevel=2,
)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(
" | ".join(symbols)
)
def dictOf(key, value):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(ZeroOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t: t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(
lambda s, l, t: unichr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(
lambda s, l, t: unichr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc
| _escapedHexChar
| _escapedOctChar
| Word(printables, excludeChars=r"\]", exact=1)
| Regex(r"\w", re.UNICODE)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Optional("^").setResultsName("negate")
+ Group(OneOrMore(_charRange | _singleChar)).setResultsName("body")
+ "]"
)
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [replStr]
def removeQuotes(s, l, t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (
Suppress("<")
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])
.setResultsName("empty")
.setParseAction(lambda s, l, t: t[0] == "/")
+ Suppress(">")
)
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(
printablesLessRAbrack
)
openTag = (
Suppress("<")
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue)
)
)
)
+ Optional("/", default=[False])
.setResultsName("empty")
.setParseAction(lambda s, l, t: t[0] == "/")
+ Suppress(">")
)
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName(
"start" + "".join(resname.replace(":", " ").title().split())
).setName("<%s>" % resname)
closeTag = closeTag.setResultsName(
"end" + "".join(resname.replace(":", " ").title().split())
).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(
s,
l,
"attribute '%s' has value '%s', must be '%s'"
% (attrName, tokens[attrName], attrValue),
)
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=""):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr: classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress("("), rpar=Suppress(")")):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group(
lastExpr + OneOrMore(opExpr)
)
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group(
lastExpr + OneOrMore(opExpr + lastExpr)
)
else:
matchExpr = FollowedBy(lastExpr + lastExpr) + Group(
lastExpr + OneOrMore(lastExpr)
)
elif arity == 3:
matchExpr = FollowedBy(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
) + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group(
opExpr + thisExpr
)
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group(
lastExpr + OneOrMore(opExpr + thisExpr)
)
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group(
lastExpr + OneOrMore(thisExpr)
)
elif arity == 3:
matchExpr = FollowedBy(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction(pa)
thisExpr <<= matchExpr.setName(termName) | lastExpr
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).setName("string enclosed in double quotes")
sglQuotedString = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("string enclosed in single quotes")
quotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("quotedString using single or double quotes")
unicodeString = Combine(_L("u") + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, basestring) and isinstance(closer, basestring):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
).setParseAction(lambda t: t[0].strip())
else:
content = empty.copy() + CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip())
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName("nested %s%s expression" % (opener, closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s, l, "not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName("INDENT")
PEER = Empty().setParseAction(checkPeerIndent).setName("")
UNDENT = Empty().setParseAction(checkUnindent).setName("UNINDENT")
if indent:
smExpr = Group(
Optional(NL)
+
# ~ FollowedBy(blockStatementExpr) +
INDENT
+ (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)))
+ UNDENT
)
else:
smExpr = Group(
Optional(NL) + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)))
)
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName("indented block")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(
Word(alphas, alphanums + "_:").setName("any tag")
)
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), "><& \"'"))
commonHTMLEntity = Regex(
"&(?P<entity>" + "|".join(_htmlEntityMap.keys()) + ");"
).setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").setName(
"C style comment"
)
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dblSlashComment
).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = (
Combine(
OneOrMore(
Word(printables, excludeChars=",")
+ Optional(Word(" \t") + ~Literal(",") + ~LineEnd())
)
)
.streamline()
.setName("commaItem")
)
commaSeparatedList = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = (
Regex(r"[+-]?\d+").setName("signed integer").setParseAction(convertToInteger)
)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (
signed_integer().setParseAction(convertToFloat)
+ "/"
+ signed_integer().setParseAction(convertToFloat)
).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0] / t[-1])
mixed_integer = (
fraction | signed_integer + Optional(Optional("-").suppress() + fraction)
).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r"[+-]?\d+\.\d*").setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = (
Regex(r"[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)")
.setName("real number with scientific notation")
.setParseAction(convertToFloat)
)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
.setName("fnumber")
.setParseAction(convertToFloat)
)
"""any int or real number, returned as float"""
identifier = Word(alphas + "_", alphanums + "_").setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).setName(
"full IPv6 address"
)
_short_ipv6_address = (
Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).setName("short IPv6 address")
_short_ipv6_address.addCondition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName(
"IPv6 address"
)
).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, excludeChars=",")
+ Optional(White(" \t"))
)
)
.streamline()
.setName("commaItem")
)
comma_separated_list = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = "*" | columnNameList
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = (
selectToken("command")
+ columnSpec("columns")
+ fromToken
+ tableNameList("tables")
)
# demo runTests method, including embedded comments in test string
simpleSQL.runTests(
"""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
"""
)
pyparsing_common.number.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
# any int or real number, returned as float
pyparsing_common.fnumber.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
pyparsing_common.hex_integer.runTests(
"""
100
FF
"""
)
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests(
"""
12345678-1234-5678-1234-567812345678
"""
)
| [
"[email protected]"
] | |
2426d938b5176cd170534198a1082ae7f41d8643 | 0e4860fecfdd34a3255003cc8c8df086c14083dd | /python/source_code/source_code_of_lp3thw/ex41.py | da1766ed9d59b79276ec183aa6177f3cb3732590 | [] | no_license | anzhihe/learning | 503ab9a58f280227011da5eaa4b14b46c678e6f3 | 66f7f801e1395207778484e1543ea26309d4b354 | refs/heads/master | 2023-08-08T11:42:11.983677 | 2023-07-29T09:19:47 | 2023-07-29T09:19:47 | 188,768,643 | 1,443 | 617 | null | 2023-08-24T02:10:34 | 2019-05-27T04:04:10 | Python | UTF-8 | Python | false | false | 6,262 | py | '''
# 关键词 训练:
class :告诉python你要做个新型式的东西出来。
object:有两层意思:第一,事物的最基础的类型;第二,any instance(建议、情况?) of thing.
instance:建议、情况。当你告诉 python 去创作一个 class 的时候,你得到的东西。
def :你在 class 里你定义了一个函数。
self :在 class 里的函数,self是一个为 instance、object可以被访问的一个变量。
inheritance :继承。这个概念是说一个 class 可以继承另一个 class 的特质,就像你和你的父母一样。
composition :合成。这个概念是说一个 class 可以由其他几个 class 进行合成,类似于汽车有4个轮子
attribute :特质、属性。class 所具有的特质,常常是通过合成得到的,并且通常是变量。
is-a:这是说这个东西是从其他东西合成的,或者说具有一种trait(特性),举个例子鲑鱼has-a嘴。
你最好做一些闪存卡,以更好的记住这些东西。
# 短语 训练:
1.class X(Y):制作一个 叫 X 的 class,这个 class 中有 Y(制作了一条鱼X,这条鱼有嘴 Y)。
2.class X(object):def _init_(slef,J): class X 具有一个叫做 M 的函数,这个函数具有 self和 J 两个参数。
3.foo =X():把 foo 设置给 classX 的情况。
4.foo.M(J):从 foo 里,获得 M 函数,并且 使用参数 self 和 J来call 它
5.foo.K = Q: 从 foo 里获得 K 特性,并把它这个特性赋值给 Q。
在上面这些里,当你看到 XYMJKQ 以及 foo,你可以对待他们像对待空白点一样。举个例子,你可以像下面这种方法来写:
1."制作一个 叫 ??? 的 class,这个 class 中有 Y"
2."class???具有一个_init_它具有 self 和 ???变量 "
3.class???具有一个函数,函数名为???这个函数具有 self 和???参数。
4.把 foo 设置给一个 class??? 的 instance
5.从 foo 中获得???函数,并且使用 self=???和参数???来call 它。
6.从 foo 里,得到???特质,并且把它设置赋值给???
# 联结训练:
1.拿短语卡片并且训练。
2.练习再练习。
# 一个阅读测试
我准备了一个小的 hack 代码,用来训练你。下面就是代码,这些代码你应该敲进oop_test.py来使用。
#下面是ex41.py的代码:
'''
import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
# 楼下写的像狗屎一样,鉴定完毕!20180319
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":#下面代码里没有
"class %%% has-a function *** that takes self and @@@",#下面代码里没有
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with parameters self,@@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
'''
来自这了的代码http://blog.csdn.net/github_37430159/article/details/54808102
'''
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(),encoding = 'utf-8'))
def convert(snippet, phrase):
class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results =[]
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(','.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class class_names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other class_names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameters lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
'''
20180318 代码错误:
bogon:lp3thw yyy$ python ex41.py
File "ex41.py", line 63
"class %%%(object):\n\tdef ***(self, @@@)":
^
SyntaxError: invalid syntax
暂时未解决。
'''
# 一些观点20180319:
'''
看ex40和 ex41 看的恶心了,也没闹明白这个老外到底在讲什么,翻开廖雪峰大神的网站,可算明白类的概念和实例的概念了。
# 小结:面向对象最重要的概念就是类(Class)和实例(Instance),必须牢记类是抽象的模板,比如Student类,而实例是根据类创建出来的一个个具体的“对象”,每个对象都拥有相同的方法,但各自的数据可能不同。
# 参考地址:1.https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/0014318645694388f1f10473d7f416e9291616be8367ab5000
# 2.https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001431864715651c99511036d884cf1b399e65ae0d27f7e000
# 另:廖神的网站做的太酷了,这种以 wiki 的形式来生成自己的知识架构是在是一件很棒的事情,我怎么来做呢?
'''
# LOG:20180319 我承认我快疯了,虽然早上看明白了廖雪峰的 class 但是看到 LP3THW 我彻底的晕菜了,晚上代码调试是通过了,但是还是浑身冒汗,这个作者写的都是些什么!!。
| [
"[email protected]"
] | |
c292d4351f077c622810d312a769321ec9aaf9cc | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_size256/pyr_0s/bce_s001_tv_s0p1_L6/step10_a.py | c878c87ed7d82d627aa11997e884698d36ad3924 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,896 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type9_mask_flow_have_bg_dtd_hdr_mix_and_paper
use_loss_obj = [G_bce_s001_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_img_resize( (256, 256) ).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_img_resize( (256, 256) ).set_result_name(result_name="L6_ch032_bl_pyr_-20220403_012819")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
d27dace65bdd94863a402b1a7e7a1fb8e6f2467c | 48122a5eca895dd926e1568e143babb6cfbef3a8 | /pyunit_address/__init__.py | d8c1e5f1e58e91c8350b9c9578bb031ad3b4be4d | [] | no_license | pyunits/pyunit-address | 5994b640cf837e5167bc1a97d17d83d440c6f2fd | f754285feaaf136c802aaf4b8b554783e50262fb | refs/heads/master | 2023-04-01T21:32:47.223824 | 2021-03-31T09:54:40 | 2021-03-31T09:54:40 | 242,957,473 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/2/18 15:23
# @Author: [email protected]
from .address import Address # 地址初始化
from .addressType import AddressType # 得到地址类型
from .correctionAddress import correct_address # 纠错地址
from .findAddress import find_address # 查询地址
from .supplementAddress import supplement_address # 补全地址
from .tool import *
__version__ = '2021.3.31'
__author__ = 'Jtyoui'
__description__ = '全国五级地址查询'
__email__ = '[email protected]'
__names__ = 'pyUnit_address'
__url__ = 'https://github.com/PyUnit/pyunit-address'
| [
"[email protected]"
] | |
3b4419a56b551d58042ccc953eecbe24d80c9d9a | c4a3eeabe660e5d6b42f704d0325a755331ab3c5 | /hyperion/setup_hyperion_old.py | 8a8e354ec45d8ae1005859b0afad19f86c6726c6 | [] | no_license | yaolun/misc | dfcfde2ac4a6429201644e1354912d3a064f9524 | 049b68ce826ddf638cec9a3b995d9ee84bf6075a | refs/heads/master | 2021-01-21T23:54:08.953071 | 2018-06-02T19:46:18 | 2018-06-02T19:46:18 | 26,666,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,602 | py | def setup_model(outdir,record_dir,outname,params,dust_file,tsc=True,idl=False,plot=False,\
low_res=True,flat=True,scale=1,radmc=False,mono=False,record=True,dstar=178.,\
aperture=None,dyn_cav=False,fix_params=None,alma=False,power=2,better_im=False,ellipsoid=False,\
TSC_dir='~/programs/misc/TSC/', IDL_path='/Applications/exelis/idl83/bin/idl',auto_disk=0.25):
"""
params = dictionary of the model parameters
alma keyword is obsoleted
outdir: The directory for storing Hyperion input files
record_dir: The directory contains "model_list.txt" for recording parameters
TSC_dir: Path the TSC-related IDL routines
IDL_path: The IDL executable
"""
import numpy as np
import astropy.constants as const
import scipy as sci
# to avoid X server error
import matplotlib as mpl
mpl.use('Agg')
#
import matplotlib.pyplot as plt
import os
from matplotlib.colors import LogNorm
from scipy.integrate import nquad
from hyperion.model import Model
from record_hyperion import record_hyperion
from outflow_inner_edge import outflow_inner_edge
from pprint import pprint
# import pdb
# pdb.set_trace()
# Constants setup
c = const.c.cgs.value
AU = 1.49598e13 # Astronomical Unit [cm]
pc = 3.08572e18 # Parsec [cm]
MS = 1.98892e33 # Solar mass [g]
LS = 3.8525e33 # Solar luminosity [erg/s]
RS = 6.96e10 # Solar radius [cm]
G = 6.67259e-8 # Gravitational constant [cm3/g/s^2]
yr = 60*60*24*365 # Years in seconds
PI = np.pi # PI constant
sigma = const.sigma_sb.cgs.value # Stefan-Boltzmann constant
mh = const.m_p.cgs.value + const.m_e.cgs.value
g2d = 100.
mmw = 2.37 # Kauffmann 2008
m = Model()
# Create dust properties
# Hyperion needs nu, albedo, chi, g, p_lin_max
from hyperion.dust import HenyeyGreensteinDust
# Read in the dust opacity table used by RADMC-3D
dust = dict()
# [dust_radmc['wl'], dust_radmc['abs'], dust_radmc['scat'], dust_radmc['g']] = np.genfromtxt(dust_file,skip_header=2).T
[dust['nu'], dust['albedo'], dust['chi'], dust['g']] = np.genfromtxt(dust_file).T
# opacity per mass of dust?
# dust_hy = dict()
# dust_hy['nu'] = c/dust_radmc['wl']*1e4
# ind = np.argsort(dust_hy['nu'])
# dust_hy['nu'] = dust_hy['nu'][ind]
# dust_hy['albedo'] = (dust_radmc['scat']/(dust_radmc['abs']+dust_radmc['scat']))[ind]
# dust_hy['chi'] = (dust_radmc['abs']+dust_radmc['scat'])[ind]
# dust_hy['g'] = dust_radmc['g'][ind]
# dust_hy['p_lin_max'] = 0*dust_radmc['wl'][ind] # assume no polarization
# d = HenyeyGreensteinDust(dust_hy['nu'], dust_hy['albedo'], dust_hy['chi'], dust_hy['g'], dust_hy['p_lin_max'])
d = HenyeyGreensteinDust(dust['nu'], dust['albedo'], dust['chi'], dust['g'], dust['g']*0)
# dust sublimation option
d.set_sublimation_temperature('slow', temperature=1600.0)
d.set_lte_emissivities(n_temp=3000,
temp_min=0.1,
temp_max=2000.)
# try to solve the freq. problem
d.optical_properties.extrapolate_nu(3.28e15, 4e15)
#
d.write(outdir+os.path.basename(dust_file).split('.')[0]+'.hdf5')
d.plot(outdir+os.path.basename(dust_file).split('.')[0]+'.png')
plt.clf()
# Grids and Density
# Calculation inherited from the script used for RADMC-3D
# Grid Parameters
nx = 300L
if low_res == True:
nx = 100L
ny = 400L
nz = 50L
[nx, ny, nz] = [int(scale*nx), int(scale*ny), int(scale*nz)]
# TSC model input setting
# params = np.genfromtxt(indir+'/tsc_params.dat', dtype=None)
dict_params = params # input_reader(params_file)
# TSC model parameter
cs = dict_params['Cs']*1e5
t = dict_params['age'] # year
omega = dict_params['Omega0']
# calculate related parameters
M_env_dot = 0.975*cs**3/G
mstar = M_env_dot * t * yr
R_cen = omega**2 * G**3 * mstar**3 /(16*cs**8)
R_inf = cs * t * yr
# M_env_dot = dict_params['M_env_dot']*MS/yr
# R_cen = dict_params['R_cen']*AU
# R_inf = dict_params['R_inf']*AU
# protostar parameter
tstar = dict_params['tstar']
R_env_max = dict_params['R_env_max']*AU
theta_cav = dict_params['theta_cav']
rho_cav_center = dict_params['rho_cav_center']
rho_cav_edge = dict_params['rho_cav_edge']*AU
rstar = dict_params['rstar']*RS
# Mostly fixed parameter
M_disk = dict_params['M_disk']*MS
beta = dict_params['beta']
h100 = dict_params['h100']*AU
rho_cav = dict_params['rho_cav']
# make M_disk varies with mstar, which is the mass of star+disk
if auto_disk != None:
if M_disk != 0:
print 'M_disk is reset to %4f of mstar (star+disk)' % auto_disk
M_disk = mstar * auto_disk
else:
print 'M_disk = 0 is found. M_disk is set to 0.'
# ellipsoid cavity parameter
if ellipsoid == True:
a_out = 130 * 178. * AU
b_out = 50 * 178. * AU
z_out = a_out
# a_in = 77.5 * 178. * AU
# b_in = 30 * 178. * AU
a_in = dict_params['a_in'] * 178. * AU
b_in = a_in/a_out*b_out
z_in = a_in
# rho_cav_out = 1e4 * mh
# rho_cav_in = 1e3 * mh
rho_cav_out = dict_params['rho_cav_out'] * mh
rho_cav_in = dict_params['rho_cav_in'] * mh
# Calculate the dust sublimation radius
T_sub = 1600
a = 1 #in micron
# realistic dust
# d_sub = 2.9388e7*(a/0.1)**-0.2 * (4*np.pi*rstar**2*sigma*tstar**4/LS)**0.5 / T_sub**3 *AU
# black body dust
d_sub = (LS/16./np.pi/sigma/AU**2*(4*np.pi*rstar**2*sigma*tstar**4/LS)/T_sub**4)**0.5 *AU
# use the dust sublimation radius as the inner radius of disk and envelope
R_disk_min = d_sub
R_env_min = d_sub
rin = rstar
rout = R_env_max
R_disk_max = R_cen
# Do the variable conversion
# cs = (G * M_env_dot / 0.975)**(1/3.) # cm/s
# t = R_inf / cs / yr # in year
# mstar = M_env_dot * t * yr
# omega = (R_cen * 16*cs**8 / (G**3 * mstar**3))**0.5
# print the variables for radmc3d
print 'Dust sublimation radius %6f AU' % (d_sub/AU)
print 'M_star %4f Solar mass' % (mstar/MS)
print 'Infall radius %4f AU' % (R_inf / AU)
# if there is any parameter found in fix_params, then fix them
if fix_params != None:
if 'R_min' in fix_params.keys():
R_disk_min = fix_params['R_min']*AU
R_env_min = fix_params['R_min']*AU
# Make the Coordinates
#
ri = rin * (rout/rin)**(np.arange(nx+1).astype(dtype='float')/float(nx))
ri = np.hstack((0.0, ri))
thetai = PI*np.arange(ny+1).astype(dtype='float')/float(ny)
phii = PI*2.0*np.arange(nz+1).astype(dtype='float')/float(nz)
# Keep the constant cell size in r-direction at large radii
#
if flat == True:
ri_cellsize = ri[1:-1]-ri[0:-2]
ind = np.where(ri_cellsize/AU > 100.0)[0][0] # The largest cell size is 100 AU
ri = np.hstack((ri[0:ind],ri[ind]+np.arange(np.ceil((rout-ri[ind])/100/AU))*100*AU))
nxx = nx
nx = len(ri)-1
# Assign the coordinates of the center of cell as its coordinates.
#
rc = 0.5*( ri[0:nx] + ri[1:nx+1] )
thetac = 0.5*( thetai[0:ny] + thetai[1:ny+1] )
phic = 0.5*( phii[0:nz] + phii[1:nz+1] )
# phic = 0.5*( phii[0:nz-1] + phii[1:nz] )
# Make the dust density model
# Make the density profile of the envelope
#
total_mass = 0
if tsc == False:
print 'Calculating the dust density profile with infall solution...'
if theta_cav != 0:
# c0 = R_env_max**(-0.5)*np.sqrt(1/np.sin(np.radians(theta_cav))**3-1/np.sin(np.radians(theta_cav)))
# using R = 10000 AU as the reference point
c0 = (10000.*AU)**(-0.5)*np.sqrt(1/np.sin(np.radians(theta_cav))**3-1/np.sin(np.radians(theta_cav)))
else:
c0 = 0
rho_env = np.zeros([len(rc),len(thetac),len(phic)])
rho_disk = np.zeros([len(rc),len(thetac),len(phic)])
rho = np.zeros([len(rc),len(thetac),len(phic)])
if dyn_cav == True:
print 'WARNING: Calculation of interdependent cavity property has not implemented in infall-only solution!'
# Normalization for the total disk mass
def f(w,z,beta,rstar,h100):
f = 2*PI*w*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/(w**beta*h100/100**beta))**2)
return f
rho_0 = M_disk/(nquad(f,[[R_disk_min,R_disk_max],[-R_env_max,R_env_max]], args=(beta,rstar,h100)))[0]
i = 0
j = 0
if 'rho_cav_center' in locals() == False:
rho_cav_center = 5.27e-18 # 1.6e-17 # 5.27e-18
print 'Use 5.27e-18 as the default value for cavity center'
if 'rho_cav_edge' in locals() == False:
rho_cav_edge = 40*AU
print 'Use 40 AU as the default value for size of the inner region'
discont = 1
for ir in range(0,len(rc)):
for itheta in range(0,len(thetac)):
for iphi in range(0,len(phic)):
if rc[ir] > R_env_min:
# Envelope profile
w = abs(rc[ir]*np.cos(np.pi/2 - thetac[itheta]))
z = rc[ir]*np.sin(np.pi/2 - thetac[itheta])
if ellipsoid == False:
z_cav = c0*abs(w)**1.5
if z_cav == 0:
z_cav = R_env_max
cav_con = abs(z) > abs(z_cav)
else:
# condition for the outer ellipsoid
cav_con = (2*(w/b_out)**2 + ((abs(z)-z_out)/a_out)**2) < 1
if cav_con:
# open cavity
if ellipsoid == False:
if rho_cav_edge == 0:
rho_cav_edge = R_env_min
if (rc[ir] <= rho_cav_edge) & (rc[ir] >= R_env_min):
rho_env[ir,itheta,iphi] = g2d * rho_cav_center#*((rc[ir]/AU)**2)
else:
rho_env[ir,itheta,iphi] = g2d * rho_cav_center*discont*(rho_cav_edge/rc[ir])**power
i += 1
else:
# condition for the inner ellipsoid
if (2*(w/b_in)**2 + ((abs(z)-z_in)/a_in)**2) > 1:
rho_env[ir,itheta,iphi] = rho_cav_out
else:
rho_env[ir,itheta,iphi] = rho_cav_in
i +=1
else:
j += 1
mu = abs(np.cos(thetac[itheta]))
# Implement new root finding algorithm
roots = np.roots(np.array([1.0, 0.0, rc[ir]/R_cen-1.0, -mu*rc[ir]/R_cen]))
if len(roots[roots.imag == 0]) == 1:
if (abs(roots[roots.imag == 0]) - 1.0) <= 0.0:
mu_o_dum = roots[roots.imag == 0]
else:
mu_o_dum = -0.5
print 'Problem with cubic solving, cos(theta) = ', mu_o_dum
print 'parameters are ', np.array([1.0, 0.0, rc[ir]/R_cen-1.0, -mu*rc[ir]/R_cen])
else:
mu_o_dum = -0.5
for imu in range(0, len(roots)):
if roots[imu]*mu >= 0.0:
if (abs((abs(roots[imu]) - 1.0)) <= 1e-5):
mu_o_dum = 1.0 * np.sign(mu)
else:
mu_o_dum = roots[imu]
if mu_o_dum == -0.5:
print 'Problem with cubic solving, roots are: ', roots
mu_o = mu_o_dum.real
rho_env[ir,itheta,iphi] = M_env_dot/(4*PI*(G*mstar*R_cen**3)**0.5)*(rc[ir]/R_cen)**(-3./2)*(1+mu/mu_o)**(-0.5)*(mu/mu_o+2*mu_o**2*R_cen/rc[ir])**(-1)
# Disk profile
if ((w >= R_disk_min) and (w <= R_disk_max)) == True:
h = ((w/(100*AU))**beta)*h100
rho_disk[ir,itheta,iphi] = rho_0*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/h)**2)
# Combine envelope and disk
rho[ir,itheta,iphi] = rho_disk[ir,itheta,iphi] + rho_env[ir,itheta,iphi]
else:
rho[ir,itheta,iphi] = 1e-30
# add the dust mass into the total count
cell_mass = rho[ir, itheta, iphi] * (1/3.)*(ri[ir+1]**3 - ri[ir]**3) * (phii[iphi+1]-phii[iphi]) * -(np.cos(thetai[itheta+1])-np.cos(thetai[itheta]))
total_mass = total_mass + cell_mass
rho_env = rho_env + 1e-40
rho_disk = rho_disk + 1e-40
rho = rho + 1e-40
# TSC model
else:
print 'Calculating the dust density profile with TSC solution...'
if theta_cav != 0:
# c0 = R_env_max**(-0.5)*np.sqrt(1/np.sin(np.radians(theta_cav))**3-1/np.sin(np.radians(theta_cav)))
c0 = (1e4*AU)**(-0.5)*np.sqrt(1/np.sin(np.radians(theta_cav))**3-1/np.sin(np.radians(theta_cav)))
else:
c0 = 0
# If needed, calculate the TSC model via IDL
#
if idl == True:
print 'Using IDL to calculate the TSC model. Make sure you are running this on mechine with IDL.'
import pidly
# idl = pidly.IDL('/Applications/exelis/idl82/bin/idl')
idl = pidly.IDL(IDL_path)
idl('.r '+TSC_dir+'tsc.pro')
# idl.pro('tsc_run', outdir=outdir, grid=[nxx,ny,nz], time=t, c_s=cs, omega=omega, rstar=rstar, renv_min=R_env_min, renv_max=R_env_max)
# idl.pro('tsc_run', outdir=outdir, grid=[nxx,ny,nz], time=t, c_s=cs, omega=omega, rstar=rstar, renv_min=R_env_min, renv_max=min([R_inf,max(ri)])) # min([R_inf,max(ri)])
#
# only run TSC calculation within infall radius
# modify the rc array
rc_idl = rc[(rc < min([R_inf,max(ri)]))]
idl.pro('tsc_run', outdir=outdir, rc=rc_idl, thetac=thetac, time=t, c_s=cs, omega=omega, renv_min=R_env_min)#, rstar=rstar, renv_min=R_env_min, renv_max=min([R_inf,max(ri)])) # min([R_inf,max(ri)])
else:
print 'Read the pre-computed TSC model.'
rc_idl = rc[(rc < min([R_inf,max(ri)]))]
# read in the exist file
rho_env_tsc_idl = np.genfromtxt(outdir+'rhoenv.dat').T
# because only region within infall radius is calculated by IDL program, need to project it to the original grid
rho_env_tsc = np.zeros([len(rc), len(thetac)])
for irc in range(len(rc)):
if rc[irc] in rc_idl:
rho_env_tsc[irc,:] = rho_env_tsc_idl[np.where(rc_idl == rc[irc]),:]
# extrapolate for the NaN values at the outer radius, usually at radius beyond the infall radius
# using r^-2 profile at radius greater than infall radius
# and map the 2d strcuture onto 3d grid
def poly(x, y, x0, deg=2):
import numpy as np
p = np.polyfit(x, y, deg)
y0 = 0
for i in range(0, len(p)):
y0 = y0 + p[i]*x0**(len(p)-i-1)
return y0
# rho_env_copy = np.array(rho_env_tsc)
# if max(rc) > R_inf:
# ind_infall = np.where(rc <= R_inf)[0][-1]
# print ind_infall
# for ithetac in range(0, len(thetac)):
# # rho_dum = np.log10(rho_env_copy[(rc > R_inf) & (np.isnan(rho_env_copy[:,ithetac]) == False),ithetac])
# # rc_dum = np.log10(rc[(rc > R_inf) & (np.isnan(rho_env_copy[:,ithetac]) == False)])
# # rc_dum_nan = np.log10(rc[(rc > R_inf) & (np.isnan(rho_env_copy[:,ithetac]) == True)])
# # # print rc_dum
# # for i in range(0, len(rc_dum_nan)):
# # rho_extrapol = poly(rc_dum, rho_dum, rc_dum_nan[i])
# # rho_env_copy[(np.log10(rc) == rc_dum_nan[i]),ithetac] = 10**rho_extrapol
# #
# for i in range(ind_infall, len(rc)):
# rho_env_copy[i, ithetac] = 10**(np.log10(rho_env_copy[ind_infall, ithetac]) - 2*(np.log10(rc[i]/rc[ind_infall])))
# rho_env2d = rho_env_copy
# rho_env = np.empty((nx,ny,nz))
# for i in range(0, nz):
# rho_env[:,:,i] = rho_env2d
# map TSC solution from IDL to actual 2-D grid
rho_env_tsc2d = np.empty((nx,ny))
if max(ri) > R_inf:
ind_infall = np.where(rc <= R_inf)[0][-1]
for i in range(0, len(rc)):
if i <= ind_infall:
rho_env_tsc2d[i,:] = rho_env_tsc[i,:]
else:
rho_env_tsc2d[i,:] = 10**(np.log10(rho_env_tsc[ind_infall,:]) - 2*(np.log10(rc[i]/rc[ind_infall])))
else:
rho_env_tsc2d = rho_env_tsc
# map it to 3-D grid
rho_env = np.empty((nx,ny,nz))
for i in range(0, nz):
rho_env[:,:,i] = rho_env_tsc2d
if dyn_cav == True:
print 'Calculate the cavity properties using the criteria that swept-up mass = outflowed mass'
# using swept-up mass = flow mass to derive the edge of the extended flat density region
v_outflow = 1e2 * 1e5
rho_cav_edge = outflow_inner_edge(np.copy(rho_env), (ri,thetai,phii),M_env_dot,v_outflow,theta_cav, R_env_min)
dict_params['rho_cav_edge'] = rho_cav_edge
# assume gas-to-dust ratio = 100
rho_cav_center = 0.01 * 0.1*M_env_dot*rho_cav_edge/v_outflow/2 / (2*np.pi/3*rho_cav_edge**3*(1-np.cos(np.radians(theta_cav))))
dict_params['rho_cav_center'] = rho_cav_center
print 'inner edge is %5f AU and density is %e g/cm3' % (rho_cav_edge/AU, rho_cav_center)
# create the array of density of disk and the whole structure
#
rho_disk = np.zeros([len(rc),len(thetac),len(phic)])
rho = np.zeros([len(rc),len(thetac),len(phic)])
# Calculate the disk scale height by the normalization of h100
def f(w,z,beta,rstar,h100):
f = 2*PI*w*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/(w**beta*h100/100**beta))**2)
return f
# The function for calculating the normalization of disk using the total disk mass
#
rho_0 = M_disk/(nquad(f,[[R_disk_min,R_disk_max],[-R_env_max,R_env_max]], args=(beta,rstar,h100)))[0]
i = 0
j = 0
if 'rho_cav_center' in locals() == False:
rho_cav_center = 5.27e-18 # 1.6e-17 # 5.27e-18
print 'Use 5.27e-18 as the default value for cavity center'
if 'rho_cav_edge' in locals() == False:
rho_cav_edge = 40*AU
print 'Use 40 AU as the default value for size of the inner region'
discont = 1
for ir in range(0,len(rc)):
for itheta in range(0,len(thetac)):
for iphi in range(0,len(phic)):
if rc[ir] > R_env_min:
# Envelope profile
w = abs(rc[ir]*np.cos(np.pi/2 - thetac[itheta]))
z = rc[ir]*np.sin(np.pi/2 - thetac[itheta])
if ellipsoid == False:
z_cav = c0*abs(w)**1.5
if z_cav == 0:
z_cav = R_env_max
cav_con = abs(z) > abs(z_cav)
else:
# condition for the outer ellipsoid
cav_con = (2*(w/b_out)**2 + ((abs(z)-z_out)/a_out)**2) < 1
if cav_con:
# open cavity
if ellipsoid == False:
if rho_cav_edge == 0:
rho_cav_edge = R_env_min
if (rc[ir] <= rho_cav_edge) & (rc[ir] >= R_env_min):
rho_env[ir,itheta,iphi] = g2d * rho_cav_center#*((rc[ir]/AU)**2)
else:
rho_env[ir,itheta,iphi] = g2d * rho_cav_center*discont*(rho_cav_edge/rc[ir])**power
i += 1
else:
# condition for the inner ellipsoid
if (2*(w/b_in)**2 + ((abs(z)-z_in)/a_in)**2) > 1:
rho_env[ir,itheta,iphi] = rho_cav_out
else:
rho_env[ir,itheta,iphi] = rho_cav_in
i +=1
# Disk profile
if ((w >= R_disk_min) and (w <= R_disk_max)) == True:
h = ((w/(100*AU))**beta)*h100
rho_disk[ir,itheta,iphi] = rho_0*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/h)**2)
# Combine envelope and disk
rho[ir,itheta,iphi] = rho_disk[ir,itheta,iphi] + rho_env[ir,itheta,iphi]
else:
rho[ir,itheta,iphi] = 1e-40
# add the dust mass into the total count
cell_mass = rho[ir, itheta, iphi] * (1/3.)*(ri[ir+1]**3 - ri[ir]**3) * (phii[iphi+1]-phii[iphi]) * -(np.cos(thetai[itheta+1])-np.cos(thetai[itheta]))
total_mass = total_mass + cell_mass
# rho_env = rho_env + 1e-40
# rho_disk = rho_disk + 1e-40
# rho = rho + 1e-40
# apply gas-to-dust ratio of 100
rho_dust = rho/g2d
total_mass_dust = total_mass/MS/g2d
print 'Total dust mass = %f Solar mass' % total_mass_dust
if record == True:
# Record the input and calculated parameters
params = dict_params.copy()
params.update({'d_sub': d_sub/AU, 'M_env_dot': M_env_dot/MS*yr, 'R_inf': R_inf/AU, 'R_cen': R_cen/AU, 'mstar': mstar/MS, 'M_tot_gas': total_mass/MS})
record_hyperion(params,record_dir)
if plot == True:
# rc setting
# mat.rcParams['text.usetex'] = True
# mat.rcParams['font.family'] = 'serif'
# mat.rcParams['font.serif'] = 'Times'
# mat.rcParams['font.sans-serif'] = 'Computer Modern Sans serif'
# Plot the azimuthal averaged density
fig = plt.figure(figsize=(8,6))
ax_env = fig.add_subplot(111,projection='polar')
# take the weighted average
# rho2d is the 2-D projection of gas density
rho2d = np.sum(rho**2,axis=2)/np.sum(rho,axis=2)
zmin = 1e-22/mmw/mh
cmap = plt.cm.CMRmap
rho2d_exp = np.hstack((rho2d,rho2d,rho2d[:,0:1]))
thetac_exp = np.hstack((thetac-PI/2, thetac+PI/2, thetac[0]-PI/2))
# plot the gas density
img_env = ax_env.pcolormesh(thetac_exp,rc/AU,rho2d_exp/mmw/mh,cmap=cmap,norm=LogNorm(vmin=zmin,vmax=1e9)) # np.nanmax(rho2d_exp/mmw/mh)
ax_env.set_xlabel(r'$\rm{Polar\,angle\,(Degree)}$',fontsize=20)
ax_env.set_ylabel(r'$\rm{Radius\,(AU)}$',fontsize=20)
ax_env.tick_params(labelsize=20)
ax_env.set_yticks(np.arange(0,R_env_max/AU,R_env_max/AU/5))
# ax_env.set_ylim([0,10000])
ax_env.set_xticklabels([r'$\rm{90^{\circ}}$',r'$\rm{45^{\circ}}$',r'$\rm{0^{\circ}}$',r'$\rm{-45^{\circ}}$',\
r'$\rm{-90^{\circ}}$',r'$\rm{-135^{\circ}}$',r'$\rm{180^{\circ}}$',r'$\rm{135^{\circ}}$'])
# fix the tick label font
ticks_font = mpl.font_manager.FontProperties(family='STIXGeneral',size=20)
for label in ax_env.get_yticklabels():
label.set_fontproperties(ticks_font)
ax_env.grid(True)
cb = fig.colorbar(img_env, pad=0.1)
cb.ax.set_ylabel(r'$\rm{Averaged\,Gas\,Density\,(cm^{-3})}$',fontsize=20)
cb.set_ticks([1e2,1e3,1e4,1e5,1e6,1e7,1e8,1e9])
cb.set_ticklabels([r'$\rm{10^{2}}$',r'$\rm{10^{3}}$',r'$\rm{10^{4}}$',r'$\rm{10^{5}}$',r'$\rm{10^{6}}$',\
r'$\rm{10^{7}}$',r'$\rm{10^{8}}$',r'$\rm{\geq 10^{9}}$'])
cb_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_obj,fontsize=20)
fig.savefig(outdir+outname+'_gas_density.png', format='png', dpi=300, bbox_inches='tight')
fig.clf()
# Plot the radial density profile
fig = plt.figure(figsize=(12,9))
ax = fig.add_subplot(111)
plot_grid = [0,49,99,149,199]
alpha = np.linspace(0.3,1.0,len(plot_grid))
for i in plot_grid:
rho_rad, = ax.plot(np.log10(rc/AU), np.log10(rho2d[:,i]/g2d/mmw/mh),'-',color='b',linewidth=2, markersize=3,alpha=alpha[plot_grid.index(i)])
tsc_only, = ax.plot(np.log10(rc/AU), np.log10(rho_env_tsc2d[:,i]/mmw/mh),'o',color='r',linewidth=2, markersize=3,alpha=alpha[plot_grid.index(i)])
rinf = ax.axvline(np.log10(R_inf/AU), linestyle='--', color='k', linewidth=1.5)
cen_r = ax.axvline(np.log10(R_cen/AU), linestyle=':', color='k', linewidth=1.5)
# sisslope, = ax.plot(np.log10(rc/AU), -2*np.log10(rc/AU)+A-(-2)*np.log10(plot_r_inf), linestyle='--', color='Orange', linewidth=1.5)
# gt_R_cen_slope, = ax.plot(np.log10(rc/AU), -1.5*np.log10(rc/AU)+B-(-1.5)*np.log10(plot_r_inf), linestyle='--', color='Orange', linewidth=1.5)
# lt_R_cen_slope, = ax.plot(np.log10(rc/AU), -0.5*np.log10(rc/AU)+A-(-0.5)*np.log10(plot_r_inf), linestyle='--', color='Orange', linewidth=1.5)
lg = plt.legend([rho_rad, tsc_only, rinf, cen_r],\
[r'$\rm{\rho_{dust}}$',r'$\rm{\rho_{tsc}}$',r'$\rm{infall\,radius}$',r'$\rm{centrifugal\,radius}$'],\
fontsize=20, numpoints=1)
ax.set_xlabel(r'$\rm{log(Radius)\,(AU)}$',fontsize=20)
ax.set_ylabel(r'$\rm{log(Gas \slash Dust\,Density)\,(cm^{-3})}$',fontsize=20)
[ax.spines[axis].set_linewidth(1.5) for axis in ['top','bottom','left','right']]
ax.minorticks_on()
ax.tick_params('both',labelsize=18,width=1.5,which='major',pad=15,length=5)
ax.tick_params('both',labelsize=18,width=1.5,which='minor',pad=15,length=2.5)
# fix the tick label font
ticks_font = mpl.font_manager.FontProperties(family='STIXGeneral',size=18)
for label in ax.get_xticklabels():
label.set_fontproperties(ticks_font)
for label in ax.get_yticklabels():
label.set_fontproperties(ticks_font)
ax.set_ylim([0,15])
fig.gca().set_xlim(left=np.log10(0.05))
# ax.set_xlim([np.log10(0.8),np.log10(10000)])
# subplot shows the radial density profile along the midplane
ax_mid = plt.axes([0.2,0.2,0.2,0.2], frameon=True)
ax_mid.plot(np.log10(rc/AU), np.log10(rho2d[:,199]/g2d/mmw/mh),'o',color='b',linewidth=1, markersize=2)
ax_mid.plot(np.log10(rc/AU), np.log10(rho_env_tsc2d[:,199]/mmw/mh),'-',color='r',linewidth=1, markersize=2)
# ax_mid.set_ylim([0,10])
# ax_mid.set_xlim([np.log10(0.8),np.log10(10000)])
ax_mid.set_ylim([0,15])
fig.savefig(outdir+outname+'_gas_radial.pdf',format='pdf',dpi=300,bbox_inches='tight')
fig.clf()
# Insert the calculated grid and dust density profile into hyperion
m.set_spherical_polar_grid(ri, thetai, phii)
# temperary for comparing full TSC and infall-only TSC model
# import sys
# sys.path.append(os.path.expanduser('~')+'/programs/misc/')
# from tsc_comparison import tsc_com
# rho_tsc, rho_ulrich = tsc_com()
m.add_density_grid(rho_dust.T, d)
# m.add_density_grid(rho.T, outdir+'oh5.hdf5') # numpy read the array in reverse order
# Define the luminsoity source
source = m.add_spherical_source()
source.luminosity = (4*PI*rstar**2)*sigma*(tstar**4) # [ergs/s]
source.radius = rstar # [cm]
source.temperature = tstar # [K]
source.position = (0., 0., 0.)
print 'L_center = % 5.2f L_sun' % ((4*PI*rstar**2)*sigma*(tstar**4)/LS)
# # add an infrared source at the center
# L_IR = 0.04
# ir_source = m.add_spherical_source()
# ir_source.luminosity = L_IR*LS
# ir_source.radius = rstar # [cm]
# ir_source.temperature = 500 # [K] peak at 10 um
# ir_source.position = (0., 0., 0.)
# print 'Additional IR source, L_IR = %5.2f L_sun' % L_IR
# Setting up the wavelength for monochromatic radiative transfer
lambda0 = 0.1
lambda1 = 2.0
lambda2 = 50.0
lambda3 = 95.0
lambda4 = 200.0
lambda5 = 314.0
lambda6 = 1000.0
n01 = 10.0
n12 = 20.0
n23 = 50.0
lam01 = lambda0 * (lambda1/lambda0)**(np.arange(n01)/n01)
lam12 = lambda1 * (lambda2/lambda1)**(np.arange(n12)/n12)
lam23 = lambda2 * (lambda6/lambda2)**(np.arange(n23+1)/n23)
lam = np.concatenate([lam01,lam12,lam23])
nlam = len(lam)
# Create camera wavelength points
n12 = 70.0
n23 = 70.0
n34 = 70.0
n45 = 50.0
n56 = 50.0
lam12 = lambda1 * (lambda2/lambda1)**(np.arange(n12)/n12)
lam23 = lambda2 * (lambda3/lambda2)**(np.arange(n23)/n23)
lam34 = lambda3 * (lambda4/lambda3)**(np.arange(n34)/n34)
lam45 = lambda4 * (lambda5/lambda4)**(np.arange(n45)/n45)
lam56 = lambda5 * (lambda6/lambda5)**(np.arange(n56+1)/n56)
lam_cam = np.concatenate([lam12,lam23,lam34,lam45,lam56])
n_lam_cam = len(lam_cam)
# Radiative transfer setting
# number of photons for temp and image
lam_list = lam.tolist()
# print lam_list
m.set_raytracing(True)
# option of using more photons for imaging
if better_im == False:
im_photon = 1e6
else:
im_photon = 5e7
if mono == True:
# Monechromatic radiative transfer setting
m.set_monochromatic(True, wavelengths=lam_list)
m.set_n_photons(initial=1000000, imaging_sources=im_photon, imaging_dust=im_photon,raytracing_sources=1000000, raytracing_dust=1000000)
else:
# regular wavelength grid setting
m.set_n_photons(initial=1000000, imaging=im_photon,raytracing_sources=1000000, raytracing_dust=1000000)
# number of iteration to compute dust specific energy (temperature)
m.set_n_initial_iterations(20)
# m.set_convergence(True, percentile=95., absolute=1.5, relative=1.02)
m.set_convergence(True, percentile=dict_params['percentile'], absolute=dict_params['absolute'], relative=dict_params['relative'])
m.set_mrw(True) # Gamma = 1 by default
# m.set_forced_first_scattering(forced_first_scattering=True)
# Setting up images and SEDs
# SED setting
# Infinite aperture
syn_inf = m.add_peeled_images(image=False)
# use the index of wavelength array used by the monochromatic radiative transfer
if mono == False:
syn_inf.set_wavelength_range(1400, 2.0, 1400.0)
syn_inf.set_viewing_angles([dict_params['view_angle']], [0.0])
syn_inf.set_uncertainties(True)
syn_inf.set_output_bytes(8)
# aperture
# 7.2 in 10 um scaled by lambda / 10
# flatten beyond 20 um
# default aperture
if aperture == None:
aperture = {'wave': [3.6, 4.5, 5.8, 8.0, 8.5, 9, 9.7, 10, 10.5, 11, 16, 20, 24, 35, 70, 100, 160, 250, 350, 500, 1300],\
'aperture': [7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 20.4, 20.4, 20.4, 20.4, 24.5, 24.5, 24.5, 24.5, 24.5, 24.5, 101]}
# assign wl_aper and aper from dictionary of aperture
wl_aper = aperture['wave']
aper = aperture['aperture']
# create the non-repetitive aperture list and index array
aper_reduced = list(set(aper))
index_reduced = np.arange(1, len(aper_reduced)+1)
# name = np.arange(1,len(wl_aper)+1)
# aper = np.empty_like(wl_aper)
# for i in range(0, len(wl_aper)):
# if wl_aper[i] < 5:
# # aper[i] = 1.2 * 7
# aper[i] = 1.8 * 4
# elif (wl_aper[i] < 14) & (wl_aper[i] >=5):
# # aper[i] = 7.2 * wl_aper[i]/10.
# aper[i] = 1.8 * 4
# elif (wl_aper[i] >= 14) & (wl_aper[i] <40):
# # aper[i] = 7.2 * 2
# aper[i] = 5.1 * 4
# else:
# aper[i] = 24.5
# dict_peel_sed = {}
# for i in range(0, len(wl_aper)):
# aper_dum = aper[i]/2 * (1/3600.*np.pi/180.)*dstar*pc
# dict_peel_sed[str(name[i])] = m.add_peeled_images(image=False)
# # use the index of wavelength array used by the monochromatic radiative transfer
# if mono == False:
# # dict_peel_sed[str(name[i])].set_wavelength_range(1300, 2.0, 1300.0)
# dict_peel_sed[str(name[i])].set_wavelength_range(1000, 2.0, 1000.0)
# dict_peel_sed[str(name[i])].set_viewing_angles([dict_params['view_angle']], [0.0])
# # aperture should be given in cm
# dict_peel_sed[str(name[i])].set_aperture_range(1, aper_dum, aper_dum)
# dict_peel_sed[str(name[i])].set_uncertainties(True)
# dict_peel_sed[str(name[i])].set_output_bytes(8)
dict_peel_sed = {}
for i in range(0, len(aper_reduced)):
aper_dum = aper_reduced[i]/2 * (1/3600.*np.pi/180.)*dstar*pc
dict_peel_sed[str(index_reduced[i])] = m.add_peeled_images(image=False)
# use the index of wavelength array used by the monochromatic radiative transfer
if mono == False:
dict_peel_sed[str(index_reduced[i])].set_wavelength_range(1400, 2.0, 1400.0)
dict_peel_sed[str(index_reduced[i])].set_viewing_angles([dict_params['view_angle']], [0.0])
# aperture should be given in cm and its the radius of the aperture
dict_peel_sed[str(index_reduced[i])].set_aperture_range(1, aper_dum, aper_dum)
dict_peel_sed[str(index_reduced[i])].set_uncertainties(True)
dict_peel_sed[str(index_reduced[i])].set_output_bytes(8)
# image setting
syn_im = m.add_peeled_images(sed=False)
# use the index of wavelength array used by the monochromatic radiative transfer
if mono == False:
syn_im.set_wavelength_range(1400, 2.0, 1400.0)
# pixel number
syn_im.set_image_size(300, 300)
syn_im.set_image_limits(-R_env_max, R_env_max, -R_env_max, R_env_max)
syn_im.set_viewing_angles([dict_params['view_angle']], [0.0])
syn_im.set_uncertainties(True)
# output as 64-bit
syn_im.set_output_bytes(8)
# Output setting
# Density
m.conf.output.output_density = 'last'
# Density difference (shows where dust was destroyed)
m.conf.output.output_density_diff = 'none'
# Energy absorbed (using pathlengths)
m.conf.output.output_specific_energy = 'last'
# Number of unique photons that passed through the cell
m.conf.output.output_n_photons = 'last'
m.write(outdir+outname+'.rtin')
if radmc == True:
# RADMC-3D still use a pre-defined aperture with lazy for-loop
aper = np.zeros([len(lam)])
ind = 0
for wl in lam:
if wl < 5:
aper[ind] = 8.4
elif wl >= 5 and wl < 14:
aper[ind] = 1.8 * 4
elif wl >= 14 and wl < 40:
aper[ind] = 5.1 * 4
else:
aper[ind] = 24.5
ind += 1
# Write the wavelength_micron.inp file
#
f_wave = open(outdir+'wavelength_micron.inp','w')
f_wave.write('%d \n' % int(nlam))
for ilam in range(0,nlam):
f_wave.write('%f \n' % lam[ilam])
f_wave.close()
# Write the camera_wavelength_micron.inp file
#
f_wave_cam = open(outdir+'camera_wavelength_micron.inp','w')
f_wave_cam.write('%d \n' % int(nlam))
for ilam in range(0,nlam):
f_wave_cam.write('%f \n' % lam[ilam])
f_wave_cam.close()
# Write the aperture_info.inp
#
f_aper = open(outdir+'aperture_info.inp','w')
f_aper.write('1 \n')
f_aper.write('%d \n' % int(nlam))
for iaper in range(0, len(aper)):
f_aper.write('%f \t %f \n' % (lam[iaper],aper[iaper]/2))
f_aper.close()
# Write the stars.inp file
#
f_star = open(outdir+'stars.inp','w')
f_star.write('2\n')
f_star.write('1 \t %d \n' % int(nlam))
f_star.write('\n')
f_star.write('%e \t %e \t %e \t %e \t %e \n' % (rstar*0.9999,mstar,0,0,0))
f_star.write('\n')
for ilam in range(0,nlam):
f_star.write('%f \n' % lam[ilam])
f_star.write('\n')
f_star.write('%f \n' % -tstar)
f_star.close()
# Write the grid file
#
f_grid = open(outdir+'amr_grid.inp','w')
f_grid.write('1\n') # iformat
f_grid.write('0\n') # AMR grid style (0=regular grid, no AMR)
f_grid.write('150\n') # Coordinate system coordsystem<100: Cartisian; 100<=coordsystem<200: Spherical; 200<=coordsystem<300: Cylindrical
f_grid.write('0\n') # gridinfo
f_grid.write('1 \t 1 \t 1 \n') # Include x,y,z coordinate
f_grid.write('%d \t %d \t %d \n' % (int(nx)-1,int(ny),int(nz))) # Size of the grid
[f_grid.write('%e \n' % ri[ir]) for ir in range(1,len(ri))]
[f_grid.write('%f \n' % thetai[itheta]) for itheta in range(0,len(thetai))]
[f_grid.write('%f \n' % phii[iphi]) for iphi in range(0,len(phii))]
f_grid.close()
# Write the density file
#
f_dust = open(outdir+'dust_density.inp','w')
f_dust.write('1 \n') # format number
f_dust.write('%d \n' % int((nx-1)*ny*nz)) # Nr of cells
f_dust.write('1 \n') # Nr of dust species
for iphi in range(0,len(phic)):
for itheta in range(0,len(thetac)):
for ir in range(1,len(rc)):
f_dust.write('%e \n' % rho_dust[ir,itheta,iphi])
f_dust.close()
# Write the dust opacity table
f_dustkappa = open(outdir+'dustkappa_oh5_extended.inp','w')
f_dustkappa.write('3 \n') # format index for including g-factor
f_dustkappa.write('%d \n' % len(dust['nu'])) # number of wavlength/frequency in the table
for i in range(len(dust['nu'])):
f_dustkappa.write('%f \t %f \t %f \t %f \n' % (c/dust['nu'][i]*1e4, dust['chi'][i], dust['chi'][i]*dust['albedo'][i]/(1-dust['albedo'][i]), dust['g'][i]))
f_dustkappa.close()
# Write the Dust opacity control file
#
f_opac = open(outdir+'dustopac.inp','w')
f_opac.write('2 Format number of this file\n')
f_opac.write('1 Nr of dust species\n')
f_opac.write('============================================================================\n')
f_opac.write('1 Way in which this dust species is read\n')
f_opac.write('0 0=Thermal grain\n')
# f_opac.write('klaus Extension of name of dustkappa_***.inp file\n')
f_opac.write('oh5_extended Extension of name of dustkappa_***.inp file\n')
f_opac.write('----------------------------------------------------------------------------\n')
f_opac.close()
# In[112]:
# Write the radmc3d.inp control file
#
f_control = open(outdir+'radmc3d.inp','w')
f_control.write('nphot = %d \n' % 100000)
f_control.write('scattering_mode_max = 2\n')
f_control.write('camera_min_drr = 0.1\n')
f_control.write('camera_min_dangle = 0.1\n')
f_control.write('camera_spher_cavity_relres = 0.1\n')
f_control.write('istar_sphere = 1\n')
f_control.write('modified_random_walk = 1\n')
f_control.close()
return m
# from input_reader import input_reader_table
# from pprint import pprint
# filename = '/Users/yaolun/programs/misc/hyperion/test_input.txt'
# params = input_reader_table(filename)
# pprint(params[0])
# indir = '/Users/yaolun/test/'
# outdir = '/Users/yaolun/test/'
# dust_file = '/Users/yaolun/programs/misc/oh5_hyperion.txt'
# # dust_file = '/Users/yaolun/Copy/dust_model/Ormel2011/hyperion/(ic-sil,gra)3opc.txt'
# # fix_params = {'R_min': 0.14}
# fix_params = {}
# setup_model(indir,outdir,'model_test',params[0],dust_file,plot=True,record=False,\
# idl=False,radmc=False,fix_params=fix_params,ellipsoid=False) | [
"[email protected]"
] | |
2382fa20e7fc0ba8a0cc593ed64f1cbe10471611 | 737c11da973590b7ae70845128caa7ca2c03be43 | /acorn/test/AcornTest/AcornUtil.py | cf04be8f05831ac48fb523f7a7565bc580cb0ff3 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | hobinyoon/apache-cassandra-3.0.5-src | c04a0309eb52a8517c74a3526680ba9d68592fd1 | fc6710f9ce117e22286b9f42955b3e7632844160 | refs/heads/master | 2020-12-21T00:02:50.636174 | 2017-05-19T14:10:27 | 2017-05-19T14:10:27 | 57,812,287 | 0 | 3 | null | 2017-05-19T14:10:28 | 2016-05-01T20:41:06 | Java | UTF-8 | Python | false | false | 1,810 | py | import os
import pprint
import sys
sys.path.insert(0, "/home/ubuntu/work/acorn-tools/util/python")
import Cons
import Util
sys.path.insert(0, "/home/ubuntu/work/acorn-tools/ec2")
import DescInst
def GenHostfiles():
dn = "%s/.run" % os.path.dirname(os.path.realpath(__file__))
fn_pssh_hn = "%s/pssh-hostnames" % dn
fn_dc_ip_map = "%s/dc-ip-map" % dn
# Generate all files if any of them doesn't exist
if os.path.isfile(fn_pssh_hn) and os.path.isfile(fn_dc_ip_map):
return
with Cons.MeasureTime("Generating host files ..."):
sys.stdout.write(" ")
inst_descriptions = DescInst.GetInstDescs("acorn-server")
#Cons.P(pprint.pformat(inst_descriptions, indent=2, width=100))
# Take only running instances. There can be other instances like "terminated".
inst_descriptions = [a for a in inst_descriptions if a["State"]["Name"] == "running"]
Util.RunSubp("mkdir -p %s" % dn)
with open(fn_pssh_hn, "w") as fo:
for inst_desc in inst_descriptions:
fo.write("%s\n" % inst_desc["PublicIpAddress"])
Cons.P("Created %s %d" % (fn_pssh_hn, os.path.getsize(fn_pssh_hn)))
with open(fn_dc_ip_map, "w") as fo:
for inst_desc in inst_descriptions:
az = inst_desc["Placement"]["AvailabilityZone"]
dc = az[:-1]
ip = inst_desc["PublicIpAddress"]
fo.write("%s %s\n" % (dc, ip))
Cons.P("Created %s %d" % (fn_dc_ip_map, os.path.getsize(fn_dc_ip_map)))
#PRJ_ROOT=$HOME/work/pr/2n
#HOSTS_FILE=$PRJ_ROOT/conf/hosts
#PSSH_OUT_DIR=/tmp/pssh-out
#
#
#def MergeOutput():
# prefix=$1
# rm -f $prefix-all
# for f in $prefix/*
# do
# echo "["`basename $f`"]" >> $prefix-all
# cat $f >> $prefix-all
# echo "" >> $prefix-all
# done
#
#
#def CatOutput():
# merge_output $@
# prefix=$1
# cat $prefix-all
#
#function less_output {
# merge_output $@
# prefix=$1
# less -r $prefix-all
#}
| [
"[email protected]"
] | |
cd9ea9f6995583d04647f40306bbc383cf0ce446 | ff3c4368081cd83b4fc30315d4ef2228d4682406 | /pipeline/sam-calc-refcov-cmp.py | 8efada185432d49997791096dc7dffd4d6cd2ad2 | [
"BSD-3-Clause"
] | permissive | dib-lab/2014-streaming | 8489aaa8ab86b409865dd1cc82f6dd68397303e3 | 4873ebfb87a7a95efdb1fbd4607ffdf76e750bbb | refs/heads/master | 2021-01-24T21:26:41.208661 | 2016-10-25T18:08:05 | 2016-10-25T18:08:05 | 28,547,623 | 1 | 1 | null | 2016-10-25T18:08:05 | 2014-12-27T22:14:19 | Jupyter Notebook | UTF-8 | Python | false | false | 2,711 | py | #! /usr/bin/env python
import sys
import argparse
import screed
import math
def ignore_at(iter):
for item in iter:
if item.startswith('@'):
continue
yield item
def main():
parser = argparse.ArgumentParser()
parser.add_argument('genome')
parser.add_argument('samfile1')
parser.add_argument('samfile2')
args = parser.parse_args()
genome_dict1 = {}
genome_dict2 = {}
for record in screed.open(args.genome):
genome_dict1[record.name] = [0] * len(record.sequence)
genome_dict2[record.name] = [0] * len(record.sequence)
n = 0
n_skipped = 0
for samline in ignore_at(open(args.samfile1)):
n += 1
if n % 100000 == 0:
print >>sys.stderr, '...1', n
readname, flags, refname, refpos, _, _, _, _, _, seq = \
samline.split('\t')[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped)
continue
refpos = int(refpos)
try:
ref = genome_dict1[refname]
except KeyError:
print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname)
n_skipped += 1
continue
for i in range(refpos - 1, refpos + len(seq) - 1):
if i < len(ref):
ref[i] = 1
n = 0
for samline in ignore_at(open(args.samfile2)):
n += 1
if n % 100000 == 0:
print >>sys.stderr, '...2', n
readname, flags, refname, refpos, _, _, _, _, _, seq = \
samline.split('\t')[:10]
if refname == '*' or refpos == '*':
# (don't count these as skipped)
continue
refpos = int(refpos)
try:
ref = genome_dict2[refname]
except KeyError:
print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname)
n_skipped += 1
continue
for i in range(refpos - 1, refpos + len(seq) - 1):
if i < len(ref):
ref[i] = 1
if n_skipped / float(n) > .01:
raise Exception, "Error: too many reads ignored! %d of %d" % \
(n_skipped, n)
total = 0.
cov1 = 0.
cov2 = 0.
for name in genome_dict1:
total += len(genome_dict1[name])
cov1 += sum(genome_dict1[name])
cov2 += sum(genome_dict2[name])
print args.samfile1, float(cov1) / float(total), cov1, total
print args.samfile2, float(cov2) / float(total), cov2, total
print 'lost: %f' % (1.0 - float(cov2) / float(cov1),)
print 'lost: %d of %d' % (cov1 - cov2, total)
if __name__ == '__main__':
main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.