blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14bc887b39a0ef1763ad3da22c7d9239cd101b13 | 3803b6364290e21061e8c0c97d3e9c0b204c50fc | /gae_main.py | 7533cd5125638887f7837401aa16cc45aa1c6a9d | [] | no_license | t0ster/django-gae-buildout-skeleton | 5a146b94d35ff466b9ee5e981c0ecdfb31eb28b5 | 71c3553b661fbd58937797d352f1a337c1641b0a | refs/heads/master | 2020-05-27T06:28:21.745403 | 2011-06-27T19:24:21 | 2011-06-27T19:24:21 | 1,962,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | import sys
from os.path import dirname, abspath, join
PROJECT_DIR = join(abspath(dirname(__file__)), "testapp")
if PROJECT_DIR not in sys.path or sys.path.index(PROJECT_DIR) > 0:
while PROJECT_DIR in sys.path:
sys.path.remove(PROJECT_DIR)
sys.path.insert(0, PROJECT_DIR)
import djangoappengine.main.main
djangoappengine.main.main.main()
| [
"[email protected]"
] | |
39b77ac51e19c5f33bf7b51871b0966c27a13121 | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/phonenumbers/data/region_SC.py | b0e98d3e74585ab661b7fde30f9021a9aaefbf4c | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 971 | py | """Auto-generated file, do not edit by hand. SC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SC = PhoneMetadata(id='SC', country_code=248, international_prefix='0(?:[02]|10?)',
general_desc=PhoneNumberDesc(national_number_pattern='[24689]\\d{5,6}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='4[2-46]\\d{5}', example_number='4217123', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='2[5-8]\\d{5}', example_number='2510123', possible_length=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8000\\d{3}', example_number='8000000', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='(?:64\\d|971)\\d{4}', example_number='6412345', possible_length=(7,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d)(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[246]'])])
| [
"[email protected]"
] | |
410c72545e00ff4b20fa2686e2cb0a81edbfd253 | 0cea2eef085a16792b0722b5ea1ccecf22ebf56a | /emu/tests/test_wps_wordcounter.py | 7957d44ea5c7acf1ec7c8b324c4d11a2df58fc2a | [
"Apache-2.0"
] | permissive | Ouranosinc/emu | 6931657412c2a3412e9548b2ad80a91c7362e79b | f3b92f44555b9e85f8c62e8e34a8a59d420a1c67 | refs/heads/master | 2021-05-04T02:00:59.517729 | 2016-11-18T17:48:31 | 2016-11-18T17:48:31 | 71,271,528 | 0 | 0 | Apache-2.0 | 2020-11-11T03:21:22 | 2016-10-18T17:03:47 | Python | UTF-8 | Python | false | false | 550 | py | import pytest
from pywps import Service
from emu.tests.common import client_for, assert_response_success
from emu.processes.wps_wordcounter import WordCounter
@pytest.mark.online
def test_wps_wordcount():
client = client_for(Service(processes=[WordCounter()]))
datainputs = "text={0}".format(
"https://en.wikipedia.org/wiki/Web_Processing_Service")
resp = client.get(
service='wps', request='execute', version='1.0.0',
identifier='wordcounter',
datainputs=datainputs)
assert_response_success(resp)
| [
"[email protected]"
] | |
ddc2256caa53e5da02d9ef82936e44811ede8002 | 71e50200ed8fec2bd567b060d52b6ab5c216dc08 | /app/auth/forms.py | 3a2250613203d75d661a4c5f7f01523c90374425 | [
"MIT"
] | permissive | ThiraTheNerd/the_blog | 5361d7b92be857e4576d3d96e64c176539ff7cba | 3edd51b2507726b4339f3b59b95133f9e2005700 | refs/heads/master | 2023-06-10T15:16:06.112694 | 2021-06-27T07:59:02 | 2021-06-27T07:59:02 | 379,469,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,ValidationError,BooleanField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
| [
"[email protected]"
] | |
264ea3af5cb07d50065bcd17d8510014c65e8225 | 65b4522c04c2be071c2d42095956fe950fe1cebe | /agu-paper/near_field_co_disp/verticals/plot_co_obs_pred.py | 1c81afdcebed862eb9e1599a073dafd28848637f | [] | no_license | geodesy/viscojapan | ac0cd93f7a2134cd2651623b94879dcc21c0c46a | 03e70265b56eb5994e73bcb6066f0be338e42f27 | refs/heads/master | 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py | from os.path import join
import numpy as np
import tempfile
import pGMT
import viscojapan as vj
gmt = pGMT.GMT()
gmt.gmtset('ANNOT_FONT_SIZE_PRIMARY','9',
'LABEL_FONT_SIZE','9',
'BASEMAP_TYPE','PLAIN',
)
gplt = gmt.gplt
lon1 = 139
lon2 = 145
lat1 = 34.7
lat2 = 41
gplt.psbasemap(
R = '{lon1}/{lon2}/{lat1}/{lat2}'.format(lon1=lon1,
lon2=lon2,
lat1 = lat1,
lat2 = lat2
), # region
J = 'B{lon0}/{lat0}/{lat1}/{lat2}/14c'.format(
lon0=(lon1+lon2)/2.,
lat0 = (lat1+lat2)/2.,
lat1 = lat1,
lat2 = lat2), # projection
B = '2', U='20/0/22/Yang', K='', P=''
)
# plot coseismic slip
splt = vj.gmt.GMTSlipPlotter(
gplt = gplt,
slip_file_txt = '../share/co_slip'
)
splt.init(
# original_cpt_file = 'bath_112.cpt',
original_cpt_file = '../Blues_09.cpt',
#if_cpt_reverse = True
)
splt.plot_slip()
splt.plot_scale(
xpos = 12,
ypos = 5)
vj.gmt.plot_plate_boundary(gplt, color=100)
scale = 5
###########################
# onshore
# plot prediction
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_pred',scale)
plt_vec.plot_vectors(arrow_width='.2', head_length='.1', head_width='.1',
pen_width='1.2')
plt_vec.plot_vec_legend(
lon=142.5, lat=40.5,
leg_len = 0.2,
leg_txt = '20 cm pred.',
text_offset_lon = -0.4,
text_offset_lat = -0.15,
if_vertical = True
)
# plot observation
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_obs',scale,'red')
plt_vec.plot_vectors(arrow_width='.2', head_length='.1', head_width='.1',
pen_width='1.2')
plt_vec.plot_vec_legend(
lon=143.2, lat=40.5,
leg_len = .2,
leg_txt = '20 cm obs. ONSHORE',
text_offset_lon = -0.2,
text_offset_lat = -0.15,
if_vertical = True,
)
#######################3
# plot seafloor:
scale = 1.5
# plot prediction
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_pred_seafloor',scale)
plt_vec.plot_empty_vectors()
plt_vec.plot_vec_legend(
lon=143, lat=35.2,
leg_len = 1,
leg_txt = '1 m pred.',
text_offset_lon = -0.2,
text_offset_lat = -0.25,
if_vertical = True
)
# plot observation
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_obs_seafloor',scale,'red')
plt_vec.plot_empty_vectors()
plt_vec.plot_vec_legend(
lon=143.7, lat=35.2,
leg_len = 1,
leg_txt = '1 m obs. SEAFLOOR',
text_offset_lon = -0.2,
text_offset_lat = -0.25,
if_vertical = True
)
gplt.pscoast(
R = '', J = '',
D = 'h', N = 'a/faint,50,--', A='500',
W = 'faint,100', L='f139.5/39.5/38/50+lkm+jt',
O = '', K='')
vj.gmt.plot_seafloor_stations(gplt, marker_size=0, network='SEAFLOOR_POST',
justification='MB', text_offset_Y=0.03,
fontsize='8')
gplt.finish()
gmt.save('seafloor_co_ver_obs_pred.pdf')
| [
"[email protected]"
] | |
b8543bcd94f5a24fda57f0ec6485022513811113 | e443674961b04476e96b0db3b7a963966bf72818 | /score/urls.py | d6fc782e280fdac71818a20f18de39f8174172f9 | [
"BSD-3-Clause"
] | permissive | jbbqqf/okapi | 14ded14219ba9ed9dc0acaea1c6b97a2b10afa73 | 3db29ef1e15685fae304190bd176f75c4e367d03 | refs/heads/master | 2022-11-28T15:08:37.357135 | 2016-01-04T16:01:25 | 2016-01-04T16:01:25 | 283,250,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from score.views import stats, mystats, ScoreView, CurrentScoreView
router = DefaultRouter()
router.register(r'scores', ScoreView)
router.register(r'currentscores', CurrentScoreView)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^stats/$', stats),
url(r'^mystats/$', mystats),
]
| [
"[email protected]"
] | |
c301f72048c538c25d20741d719ccdcd362b3ffb | ad0857eaba945c75e705594a53c40dbdd40467fe | /baekjoon/python/pow_of_matrix_10830.py | 96dbae8eb57691de783032c11a2b4137b5b2df3e | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 1,347 | py | # Title: 행렬 제곱
# Link: https://www.acmicpc.net/problem/10830
import sys
import copy
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def multiple_matrix(n: int, a: list, b: list):
res = []
for row in a:
t_row = []
for i in range(n):
s = 0
for x, v in enumerate(row):
s = (s + (v * (b[x][i] % 1000)) % 1000)%1000
t_row.append(s)
res.append(t_row)
return res
def solution(n: int, b: int, matrix: list):
bin_b = list('{0:b}'.format(b))
acc = [[1 if x==y else 0 for x in range(n)] for y in range(n)]
temp = copy.deepcopy(matrix)
if bin_b.pop() == '1':
acc = multiple_matrix(n, acc, matrix)
while bin_b:
temp = multiple_matrix(n, temp, temp)
if bin_b.pop() == '1':
acc = multiple_matrix(n, acc, temp)
temp = [[1 if x==y else 0 for x in range(n)] for y in range(n)]
ans = []
for row in acc:
ans.append(' '.join(map(str, row)))
return '\n'.join(ans)
def main():
n, b = read_list_int()
matrix = []
for _ in range(n):
matrix.append(read_list_int())
print(solution(n, b, matrix))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
777aadcb892990786aef2675249423db680d99b2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_099/ch34_2019_08_28_17_31_34_673027.py | 1e0afb12ab5000e405d6a94403271238303134e8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | dep = float(input('Qual o depósito inicial?'))
i = float(input('Qual a taxa de juros?'))
total=0
t=1
while i<=24:
mes = (dep*(1+i)**t)
total = total + mes
print ('{0}:.2f'.format(mes))
i=i+1
print ('{0}:.2f'.format(total)) | [
"[email protected]"
] | |
413432f0777905651ea105e4b5bb856331ca8748 | f09edd827db021e4a22dd9b479f69acff1fe5079 | /tensorflow/contrib/learn/python/learn/estimators/run_config.py | ab48e3beea822ec8dc958f37d8060bd9b9af64f5 | [
"Apache-2.0"
] | permissive | tocigm/itemRec-offline | 24c92cc882c2cfab081a3a3761257272e01b6b81 | dafe10721a7bdc39dad27ded8325801b83fbd7e6 | refs/heads/master | 2022-11-25T12:30:23.445457 | 2016-09-07T14:51:52 | 2016-09-07T14:51:52 | 67,612,191 | 0 | 1 | Apache-2.0 | 2022-11-16T08:22:25 | 2016-09-07T14:05:16 | C++ | UTF-8 | Python | false | false | 9,970 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.python import ConfigProto
from tensorflow.python import GPUOptions
from tensorflow.python.training.server_lib import ClusterSpec
class RunConfig(object):
"""This class specifies the specific configurations for the run.
If you're a Google-internal user using command line flags with learn_runner.py
(for instance, to do distributed training or to use parameter servers), you
probably want to use learn_runner.EstimatorConfig instead.
"""
# TODO(wicke): Move options out once functionality is covered by monitors
def __init__(self,
master=None,
task=None,
num_ps_replicas=None,
num_cores=4,
log_device_placement=False,
gpu_memory_fraction=1,
cluster_spec=None,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=600,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
job_name=None,
is_chief=None):
"""Constructor.
If set to None, `master`, `task`, `num_ps_replicas`, `cluster_spec`,
`job_name`, and `is_chief` are set based on the TF_CONFIG environment
variable, if the pertinent information is present; otherwise, the defaults
listed in the Args section apply.
The TF_CONFIG environment variable is a JSON object with two relevant
attributes: `task` and `cluster_spec`. `cluster_spec` is a JSON serialized
version of the Python dict described in server_lib.py. `task` has two
attributes: `type` and `index`, where `type` can be any of the task types
in the cluster_spec. When TF_CONFIG contains said information, the
following properties are set on this class:
* `job_name` is set to [`task`][`type`]
* `task` is set to [`task`][`index`]
* `cluster_spec` is parsed from [`cluster`]
* 'master' is determined by looking up `job_name` and `task` in the
cluster_spec.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` job of `cluster_spec`.
* `is_chief`: true when `job_name` == "master" and `task` == 0.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps({
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}}})
config = RunConfig()
assert config.master == 'host4:2222'
assert config.task == 1
assert config.num_ps_replicas == 2
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.job_name == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
task: Task id of the replica running the training (default: 0).
num_ps_replicas: Number of parameter server tasks to use (default: 0).
num_cores: Number of cores to be used (default: 4).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
cluster_spec: a `tf.train.ClusterSpec` object that describes the cluster
in the case of distributed computation. If missing, reasonable
assumptions are made for the addresses of jobs.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
job_name: the type of task, e.g., 'ps', 'worker', etc. The `job_name`
must exist in the `cluster_spec.jobs`.
is_chief: whether or not this task (as identified by the other parameters)
should be the chief task.
Raises:
ValueError: if num_ps_replicas and cluster_spec are set (cluster_spec
may fome from the TF_CONFIG environment variable).
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
if not cluster_spec and 'cluster' in config:
cluster_spec = ClusterSpec(config['cluster'])
self.cluster_spec = cluster_spec
# Set job_name and task. If explicitly specified, use those values,
# otherwise, if the TF_CONFIG environment variable is present, use that.
# Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._job_name = job_name or task_env.get('type') or None
self.task = task if task is not None else task_env.get('index') or 0
self.master = (master or _get_master(self.cluster_spec, self.job_name,
self.task) or '')
if num_ps_replicas is not None and self.cluster_spec:
raise ValueError('Cannot specify both num_ps_replicas and cluster_spec. '
'Note: cluster_spec may have been set in the TF_CONFIG '
'environment variable.')
self.num_ps_replicas = num_ps_replicas or _count_ps(self.cluster_spec) or 0
# Set is_chief.
self._is_chief = is_chief
# When the TF_CONFIG environment variable is set, we can set the default
# of is_chief to 0 when job_name is "master" and task is 0.
if (self._is_chief is None) and config:
self._is_chief = (self._job_name == 'master' and self.task == 0)
# Enforce that is_chief is only applicable to workers or masters
# (Cloud ML) with task == 0.
if self._is_chief:
if self.task != 0:
raise ValueError(
'Task is %d, but only task 0 may be chief. Please check is_chief '
'and task, which may have been set in TF_CONFIG environment '
'variable.' % (self.task,))
if self._job_name not in (None, 'master', 'worker'):
raise ValueError(
'job_name is \'%s\', but only masters or workers may be chiefs. '
'Please check is_chief and job_name, which may have been set in '
'TF_CONFIG environment variable.' % (self._job_name,))
elif (self._is_chief is False and self._job_name == 'master' and
self.task == 0):
raise ValueError(
'Master task 0 must be chief. Please check is_chief, job_name, and '
'task, which may have been set in TF_CONFIG environment variable.')
gpu_options = GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self.tf_config = ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self.tf_random_seed = tf_random_seed
self.save_summary_steps = save_summary_steps
self.save_checkpoints_secs = save_checkpoints_secs
self.keep_checkpoint_max = keep_checkpoint_max
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
@property
def is_chief(self):
return self._is_chief
@property
def job_name(self):
return self._job_name
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _get_master(cluster_spec, job_name, task_index):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using job_name and task_index,
# if possible.
if job_name:
if job_name not in jobs:
raise ValueError(
'%s is not a valid task in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (job_name, cluster_spec))
addresses = cluster_spec.job_tasks(job_name)
if task_index >= len(addresses) or task_index < 0:
raise ValueError(
'%d is not a valid task index for task type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_index, job_name, cluster_spec))
return addresses[task_index]
# For backwards compatibility, we return empty string if job_name was
# not set (job_name did not previously exist).
return ''
| [
"[email protected]"
] | |
f9ca04d56bc46f75f3cd88a867059f4016baeb1f | c60c199410289c1d7ec4aea00833b461e1f08f88 | /.history/older-than/older/source-example/day3/function/cpall.py | 4f328903303857e622385428d5a8aeb80f642d18 | [] | no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #!/usr/bin/env python
import sys
def copyall(*files):
target = files[-1]
with open(target, 'w') as fw:
for filename in files[:-1]:
with open(filename) as fp:
fw.write(fp.name.center(60, '-') + "\n")
for line in fp:
fw.write(line)
print "%s : file copied " % fp.name
fw.write('-'.center(60, '-')+"\n")
fw.write("\n")
def usage():
if len(sys.argv) < 3:
print "Usage : "
print "%s source [source ....] target" % sys.argv[0]
exit(1)
usage()
copyall(*sys.argv[1:])
| [
"[email protected]"
] | |
7eabfc4c5ae2a54b39ec1b5679ebc7261404c15a | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/101-200/week 195/1499. Max Value of Equation/Max Value of Equation.py | 4a8cd0f2c2a2c938b386ace14ced1673cfa96cb1 | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: [email protected]
@software: PyCharm
@file: Max Value of Equation
@time: 2020/06/28 15:53
"""
import heapq
import sys
class Solution:
def findMaxValueOfEquation(self, points: list(list()), k: int) -> int:
h = []
ret = -sys.maxsize
for px, py in points:
while h and px - h[0][1] > k:
heapq.heappop(h)
if h:
ret = max(ret, px + py - h[0][0])
heapq.heappush(h, (px - py, px))
return ret
| [
"[email protected]"
] | |
cc76df2f50876491c6194483bb5da74af6344ea2 | 7771130ea6eb1f076a7d18e672d3d82d5996e957 | /contrib/testgen/base58.py | 70fccf5b3202c12cb969d604d6ee3cb3e8055d00 | [
"MIT"
] | permissive | gdrcoin/gdrcoin | 49707508dfc1b14ace3817854416355a925539df | f9f2137b3d9069bfc8e3c69c90a684a061dfb6aa | refs/heads/master | 2020-03-10T18:01:49.563615 | 2018-04-14T12:36:52 | 2018-04-14T12:36:52 | 129,511,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,978 | py | # Copyright (c) 2012-2016 The Gdrcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Gdrcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Gdrcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/gdrcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| [
"[email protected]"
] | |
ad2f495082c2cab1b514035b13c0660751bebe7f | d0e379bfe75597bb7f20bd7cd5d62ea9c59478a4 | /appengine/clients/models.py | dfeccfd8287c5e9ccfeaa17dc95eaa3fcdd2e64a | [] | no_license | slee124565/flh-homecenter-appeng | 3064224dc8e7b1ce2b2e381242f26da823b89c5c | 92b508c3d39d173e250d221019cd0914ff65e5cd | refs/heads/master | 2021-04-30T11:10:02.808188 | 2018-02-27T06:40:28 | 2018-02-27T06:40:28 | 121,348,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# from django.db import models
from firebasedb.models import FirebaseDB
import logging
logger = logging.getLogger(__name__)
def get_dev_db_obj(pi_serial):
firebase_db = FirebaseDB()
db_dev = firebase_db.get_dev_info(pi_serial)
return db_dev
class ClientDev(object):
_pi_serial = None
def __init__(self, pi_serial):
self._pi_serial = pi_serial
self._db_obj = get_dev_db_obj(self._pi_serial)
if self._db_obj is None:
raise ValueError('device ({obj._pi_serial} not exist in db)'.format(obj=self))
def __str__(self):
return '{obj.__class__.__name__}({obj._pi_serial})'.format(obj=self)
def get_dev_http_url(self):
http_tunnel = None
if self._db_obj.get('info',{}).get('tunnels',None):
dev_tunnels = self._db_obj.get('info',{}).get('tunnels',None)
for tunnel in dev_tunnels:
if tunnel.find('http://') >= 0:
http_tunnel = tunnel
logger.debug('{obj} get_dev_http_url: {tunnel}'.format(obj=self, tunnel=http_tunnel))
return http_tunnel
| [
"[email protected]"
] | |
6e7b9d64a9343c09209781e9882c730170067fb8 | 66fbb969c8844f6a0db7cf8939c3412516bf54ca | /binary_tree_maximum_path_sum.py | ac6419d3ffe9ca2ad49c162cc8be4b6fa92bc5c3 | [] | no_license | chunweiliu/leetcode2 | 67a86d5a0d8c3ffe41f53a46b0e5960edc64c56d | 086b7c9b3651a0e70c5794f6c264eb975cc90363 | refs/heads/master | 2021-01-19T08:13:12.667448 | 2017-04-08T06:02:35 | 2017-04-08T06:02:35 | 87,612,004 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxPathSum(self, root):
"""
Each path in this tree is either
1) starting from a root to a leaf, or
2) accrossing a root from one subtree to another.
We exam each node as a root in this tree.
:type root: TreeNode
:rtype: int
Questions:
* Does the node have negative value?
- If not than just call max_path_value(root)
Example:
1
2 3
=> 6
"""
def max_path_to_root(root):
if not root:
return 0
l = max(0, max_path_to_root(root.left))
r = max(0, max_path_to_root(root.right))
# The value won't pass upon.
# root
# / \
# left right
self.max_path_value = max(self.max_path_value,
l + root.val + r)
# The value can be passed to its partent.
# parent
# \
# root
# /
# left
return root.val + max(l, r)
self.max_path_value = None
max_path_value(root)
return self.max_path_value
| [
"[email protected]"
] | |
e0a2821d8090488a95f78871663fc5e00b3fc88c | 08e2c58c06ec587cc20c2bcd084efb9308d86043 | /milp/gen_data.py | b84d5e6d592b0fcc941730fa9ea3dcece023aef9 | [] | no_license | horacepan/qap_scratch | f866ac6c62715b779039488bb0e9f2b13d859e5b | 437ff78226173d9cfb465198c6e7183d86e948c6 | refs/heads/master | 2023-03-04T12:01:39.259432 | 2021-02-09T05:50:53 | 2021-02-09T05:50:53 | 287,471,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | import numpy as np
def generate_max_cut(n, e):
c = np.zeros(n + e)
c[n:] = -1
all_edges = [(i, j) for i in range(n) for j in range(i)]
picked_idx = np.random.choice(len(all_edges), size=e, replace=False)
edges = [all_edges[i] for i in picked_idx]
A = np.zeros((3*e, n + e))
b = np.zeros(3*e)
row = 0
for idx, (u, v) in enumerate(edges):
e_idx = n + idx
b[row] = 0
A[row, e_idx] = 1
A[row, u] = -1
A[row, v] = -1
b[row + 1] = 2
A[row + 1, u] = 1
A[row + 1, v] = 1
A[row + 1, e_idx] = 1
A[row + 2, e_idx] = 1
b[row + 2] = 1
row += 3
return A, b, c
| [
"[email protected]"
] | |
2ef4fd2907d466bacb152306a4aa8b502e43bfc3 | 9ce40f0dbdfb3e32dc65207c37c8b65ea531162b | /asynchat/send_http_request.py | 18d473d2c7c81598d3b90076c843e4cd0ded15fe | [] | no_license | chenhz5/learn_socket | 83eba5c6e59a2f336311a443d39758313bcf2a5c | fbada734d97751fb0f01ac486ce2369437624ba3 | refs/heads/master | 2021-01-21T23:30:02.404537 | 2014-06-26T07:20:00 | 2014-06-26T07:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | import requests
r = requests.get("http://localhost:8000")
print r.text | [
"[email protected]"
] | |
5ebe93bba8504216fbee8a7b1e51360b0fb5b8ed | cb657cd7c2260af788f9e03c7b0f3d9139c222fa | /Bath/Bath/settings.py | 6851b51e0c2223a8217166d31986589b34311507 | [] | no_license | west789/Bath-Of-University | f1b8cdcbb55795a04e3cf4ff869a59aa6576dc29 | c850e57b9e23be906c83883ce478aeb4cb618986 | refs/heads/master | 2020-03-08T04:36:45.474999 | 2018-04-03T15:16:40 | 2018-04-03T15:16:40 | 127,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,328 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Bath project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Bath'
SPIDER_MODULES = ['Bath.spiders']
NEWSPIDER_MODULE = 'Bath.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Bath (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
DEFAULT_REQUEST_HEADERS = {
"User-Agent": " Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Bath.middlewares.BathSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Bath.middlewares.BathDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Bath.pipelines.BathPipeline': 300,
#}
ITEM_PIPELINES = {
# 'Australia_2.pipelines.Australia2Pipeline': 300,
"Bath.pipelines.MyprojectPipeline": 1
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
0df5c1a67bb3e0337a8ec0bcb8c2f0998e0366e9 | ee79e734486c0ca550bb8238ef54c78c7727384a | /BisulfiteSeq Bismark/samMethylationExtractor.py | c2b8bae5b238ace1d7cb0b812a37a996efa81c77 | [] | no_license | neilrobertson/BICRCode | 212636e5395f0c0e4dfb3ac3c133f01eb07273ee | 7b3f4da9cdefd7680f07b707339aee59faece1d2 | refs/heads/master | 2020-03-30T16:07:22.637571 | 2018-10-03T10:12:57 | 2018-10-03T10:12:57 | 151,394,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,001 | py | '''
Created on 19 Aug 2011
@author: mcbryan
'''
import getopt
import sys
import csv
from sam.SamFormat import SAMFile
from sequence.genome import MemoryGenome
from genemapping.chrmEnds import ChromosomeEnds
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["sam="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
debug = False
infile = None
strand = "+"
for o, a in opts:
if o=="--sam":
infile = a
print "SAM:", a
assert infile != None
with open(infile+".methcalls","w") as outfile:
csvout = csv.writer(outfile,delimiter="\t")
genome = MemoryGenome("hg18")
ends = ChromosomeEnds("hg18")
sam = SAMFile(infile)
def findGenerator(haystack,needle):
index = 0
while True:
index = haystack.find(needle,index)
if index == -1:
break
else:
yield index
index += 1
for read in sam:
# chromosomes we don't know about (chrL is probably lambda)
if read.chrm not in ends:
continue
# check strand of the read
strand = "-" if read.checkFlag(0x10) else "+"
# get the genomic sequence (extended by 1 base on each side)
# check the genomic sequence for CG dinucleotides
# for each CG, extract the base from the sequence read corresponding to the C (make sure we do this right for both strands)
# if it is a C then it is methylated
# if it is a T then it is unmethylated
# only valid for Lister et al. style sequencing (where complementary sequences to original DNA are not sequenced)
# we want to get the genomic sequence + 1 base on each side
# assume we can't go off the beginning and the end of the sequence at the same time
# ie. our chromosomes are comparatively large compared to our reads
assert read.start-1 > 0 or read.start+len(read.sequence)+1 <= ends[read.chrm]
if read.start-1 < 0:
# we will go off the beginning of the chromosome, pad with one N
genomeseq = "N" + genome.getSequence(read.chrm,read.start,read.start+len(read.sequence)+1)
elif read.start+len(read.sequence)+1 > ends[read.chrm]:
# we will go off the end of the chromosome, pad with one N
genomeseq = genome.getSequence(read.chrm,read.start-1,read.start+len(read.sequence)) + "N"
else:
genomeseq = genome.getSequence(read.chrm,read.start-1,read.start+len(read.sequence)+1)
# make the two sequences comparable in terms of character set (all uppercase) + start positions
read.sequence = "N"+read.sequence.upper()+"N"
genomeseq = genomeseq.upper()
# do a check to see if there are any CG's in there first (slower than not checking of course)
# only searches genomic forward strand but this is fine since CG's are the same on both strands
if "CG" in genomeseq:
if debug:
print
print read.chrm, read.start, len(read.sequence)+read.start
print read.sequence
print genomeseq
print "CG".join(["-"*len(seq) for seq in genomeseq.split("CG")])
# outputs (C,G) locations
locs = [(C,C+1) for C in findGenerator(genomeseq,"CG")]
if strand == "+":
bases = [(read.start+C,read.sequence[C:C+1]) for C,G in locs]
else:
# we want the G from the CG (which is a C on the opposite strand and which is one base along)
# note that the sequence in the SAM file is complemented compared to the genome. i.e. it's the actual
# sequence from the sequencer and will still have C or T as the basis for meth / unmeth calls
bases = [(read.start+G,read.sequence[G:G+1]) for C,G in locs]
for pos, base in bases:
if base in ["C","T"]: # ignore anything that's got an N or a SNP at that position
# we can make a meth call
# C methylated, T unmethylated
methCall = "z" if base == "T" else "Z"
methState = "-" if base == "T" else "+"
csvout.writerow([read.key,methState,read.chrm,pos,methCall]) | [
"[email protected]"
] | |
7cc83d960c1b20ca0acd173372e6a5ff009be6b9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_352/ch9_2020_03_09_13_29_39_224551.py | f8f513cff5ecf9d663269d6059ffc6ee93b59378 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | import math
def calcula_volume_da_esfera(r):
v = (4/3)*(math.pi)*(r**3)
return v | [
"[email protected]"
] | |
b4b98caaa2969e430f5d7568919aed86cdc98771 | e0c378f27462cb00b656473cbe0b172886741818 | /src/flash/core/hooks.py | dd62266117168b06ef9eb7657cc917bd923c24e1 | [
"Apache-2.0"
] | permissive | Lightning-Universe/lightning-flash | d0c955d7fdf962175750d154b3a369a483b8d188 | fc6c97a43d65b49561c896bf05bc1c75536d0dc0 | refs/heads/master | 2023-08-17T12:03:52.563905 | 2023-08-14T12:35:10 | 2023-08-14T12:35:10 | 333,857,397 | 58 | 12 | Apache-2.0 | 2023-09-11T14:43:06 | 2021-01-28T18:47:16 | Python | UTF-8 | Python | false | false | 942 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Optional, Union
from torch.nn import Module
class FineTuningHooks:
"""Hooks to be used in Task and FlashBaseTuning."""
def modules_to_freeze(self) -> Optional[Union[Module, Iterable[Union[Module, Iterable]]]]:
"""Return the name(s) of the module attributes of the model to be frozen."""
return None
| [
"[email protected]"
] | |
6a3d89ee3512651ca1fc08d58c99c8187d13625d | 4da29a053ecbf0b4f35bbb0d9718436b271df74c | /language_skills/migrations/0020_auto_20200723_1017.py | daa9961d9ddee5010ddae913ecb51bae80c4ae73 | [] | no_license | alirezahi/QA-System | cd232234aa4310d0d8358b18f41e9156214cc503 | 28adc482ed2563374b70cfbea8ab935a2aa4eb2e | refs/heads/master | 2021-07-10T10:17:12.475850 | 2020-11-07T03:13:53 | 2020-11-07T03:13:53 | 211,319,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.1.1 on 2020-07-23 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('language_skills', '0019_auto_20200424_2159'),
]
operations = [
migrations.AddField(
model_name='blankquestion',
name='part_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='multiplechoicequestion',
name='part_text',
field=models.TextField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
901d85aaa0f2e67d7829c00644e6973faf379c81 | f3b233e5053e28fa95c549017bd75a30456eb50c | /CDK2_input/L17/17-1Q_wat_20Abox/set_1ns_equi.py | 8ebca1c961ba017ec56400ce4e7ed9b2321bdc17 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
dir = '/mnt/scratch/songlin3/run/CDK2/L17/wat_20Abox/ti_one-step/17_1Q/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../17-1Q_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
014b4a3ae3f9aff05afd54c12352843fa94c84d7 | bfd41fc543f6dbfc821341522cf8e7a9d2e34ce8 | /venvc/bin/webassets | 310b3858768f4cb03f0811a3a3fb16df0ce55f40 | [] | no_license | MaraKovalcik/Flask | 783243560ead637a381f76d3893da2b212eff898 | 1ff8413f3551b051f8e6c76db6cf402fc7428188 | refs/heads/master | 2021-01-22T09:09:16.165734 | 2015-02-24T16:57:14 | 2015-02-24T16:57:14 | 31,268,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | #!/home/student/PycharmProjects/flask-skeleton/venvc/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'webassets==0.9','console_scripts','webassets'
__requires__ = 'webassets==0.9'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('webassets==0.9', 'console_scripts', 'webassets')()
)
| [
"[email protected]"
] | ||
a888103bae83d2ead7218fd412b06478ce9351a5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2514/60666/252527.py | 05e3a91ada6e526e829a7483701080f0f6eb25d1 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | s=input()
t=input()
if not s:
print(True)
else:
sIndex=0
tIndex=0
while tIndex<len(t):
if sIndex==len(s):
print(True)
elif s[sIndex]==t[tIndex]:
sIndex+=1
tIndex+=1
else:
tIndex+=1
flag=(sIndex==len(s))
print(flag) | [
"[email protected]"
] | |
d54310903f61c0db2c2d7ee18765f0b698ebaedb | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/batch/v20200301/_enums.py | 443b074d1c64b03bd1df443ab17ca5b607c42bad | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,776 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AutoUserScope',
'CachingType',
'CertificateFormat',
'CertificateStoreLocation',
'CertificateVisibility',
'ComputeNodeDeallocationOption',
'ComputeNodeFillType',
'ContainerType',
'ContainerWorkingDirectory',
'DiskEncryptionTarget',
'ElevationLevel',
'IPAddressProvisioningType',
'InboundEndpointProtocol',
'InterNodeCommunicationState',
'KeySource',
'LoginMode',
'NetworkSecurityGroupRuleAccess',
'PoolAllocationMode',
'PublicNetworkAccessType',
'StorageAccountType',
]
class AutoUserScope(str, Enum):
"""
The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks.
"""
TASK = "Task"
"""
Specifies that the service should create a new user for the task.
"""
POOL = "Pool"
"""
Specifies that the task runs as the common auto user account which is created on every node in a pool.
"""
class CachingType(str, Enum):
"""
Values are:
none - The caching mode for the disk is not enabled.
readOnly - The caching mode for the disk is read only.
readWrite - The caching mode for the disk is read and write.
The default value for caching is none. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.
"""
NONE = "None"
"""
The caching mode for the disk is not enabled.
"""
READ_ONLY = "ReadOnly"
"""
The caching mode for the disk is read only.
"""
READ_WRITE = "ReadWrite"
"""
The caching mode for the disk is read and write.
"""
class CertificateFormat(str, Enum):
"""
The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx.
"""
PFX = "Pfx"
"""
The certificate is a PFX (PKCS#12) formatted certificate or certificate chain.
"""
CER = "Cer"
"""
The certificate is a base64-encoded X.509 certificate.
"""
class CertificateStoreLocation(str, Enum):
"""
The default value is currentUser. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
"""
CURRENT_USER = "CurrentUser"
"""
Certificates should be installed to the CurrentUser certificate store.
"""
LOCAL_MACHINE = "LocalMachine"
"""
Certificates should be installed to the LocalMachine certificate store.
"""
class CertificateVisibility(str, Enum):
START_TASK = "StartTask"
"""
The certificate should be visible to the user account under which the start task is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well.
"""
TASK = "Task"
"""
The certificate should be visible to the user accounts under which job tasks are run.
"""
REMOTE_USER = "RemoteUser"
"""
The certificate should be visible to the user accounts under which users remotely access the node.
"""
class ComputeNodeDeallocationOption(str, Enum):
"""
If omitted, the default value is Requeue.
"""
REQUEUE = "Requeue"
"""
Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Remove nodes as soon as tasks have been terminated.
"""
TERMINATE = "Terminate"
"""
Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove nodes as soon as tasks have been terminated.
"""
TASK_COMPLETION = "TaskCompletion"
"""
Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove nodes when all tasks have completed.
"""
RETAINED_DATA = "RetainedData"
"""
Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired.
"""
class ComputeNodeFillType(str, Enum):
SPREAD = "Spread"
"""
Tasks should be assigned evenly across all nodes in the pool.
"""
PACK = "Pack"
"""
As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any tasks are assigned to the next node in the pool.
"""
class ContainerType(str, Enum):
DOCKER_COMPATIBLE = "DockerCompatible"
"""
A Docker compatible container technology will be used to launch the containers.
"""
class ContainerWorkingDirectory(str, Enum):
TASK_WORKING_DIRECTORY = "TaskWorkingDirectory"
"""
Use the standard Batch service task working directory, which will contain the Task resource files populated by Batch.
"""
CONTAINER_IMAGE_DEFAULT = "ContainerImageDefault"
"""
Using container image defined working directory. Beware that this directory will not contain the resource files downloaded by Batch.
"""
class DiskEncryptionTarget(str, Enum):
"""
If omitted, no disks on the compute nodes in the pool will be encrypted.
"""
OS_DISK = "OsDisk"
"""
The OS Disk on the compute node is encrypted.
"""
TEMPORARY_DISK = "TemporaryDisk"
"""
The temporary disk on the compute node is encrypted. On Linux this encryption applies to other partitions (such as those on mounted data disks) when encryption occurs at boot time.
"""
class ElevationLevel(str, Enum):
"""
nonAdmin - The auto user is a standard user without elevated access. admin - The auto user is a user with elevated access and operates with full Administrator permissions. The default value is nonAdmin.
"""
NON_ADMIN = "NonAdmin"
"""
The user is a standard user without elevated access.
"""
ADMIN = "Admin"
"""
The user is a user with elevated access and operates with full Administrator permissions.
"""
class IPAddressProvisioningType(str, Enum):
"""
The default value is BatchManaged
"""
BATCH_MANAGED = "BatchManaged"
"""
A public IP will be created and managed by Batch. There may be multiple public IPs depending on the size of the Pool.
"""
USER_MANAGED = "UserManaged"
"""
Public IPs are provided by the user and will be used to provision the Compute Nodes.
"""
NO_PUBLIC_IP_ADDRESSES = "NoPublicIPAddresses"
"""
No public IP Address will be created for the Compute Nodes in the Pool.
"""
class InboundEndpointProtocol(str, Enum):
TCP = "TCP"
"""
Use TCP for the endpoint.
"""
UDP = "UDP"
"""
Use UDP for the endpoint.
"""
class InterNodeCommunicationState(str, Enum):
"""
This imposes restrictions on which nodes can be assigned to the pool. Enabling this value can reduce the chance of the requested number of nodes to be allocated in the pool. If not specified, this value defaults to 'Disabled'.
"""
ENABLED = "Enabled"
"""
Enable network communication between virtual machines.
"""
DISABLED = "Disabled"
"""
Disable network communication between virtual machines.
"""
class KeySource(str, Enum):
"""
Type of the key source.
"""
MICROSOFT_BATCH = "Microsoft.Batch"
"""
Batch creates and manages the encryption keys used to protect the account data.
"""
MICROSOFT_KEY_VAULT = "Microsoft.KeyVault"
"""
The encryption keys used to protect the account data are stored in an external key vault.
"""
class LoginMode(str, Enum):
"""
Specifies login mode for the user. The default value for VirtualMachineConfiguration pools is interactive mode and for CloudServiceConfiguration pools is batch mode.
"""
BATCH = "Batch"
"""
The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes.
"""
INTERACTIVE = "Interactive"
"""
The LOGON32_LOGON_INTERACTIVE Win32 login mode. Some applications require having permissions associated with the interactive login mode. If this is the case for an application used in your task, then this option is recommended.
"""
class NetworkSecurityGroupRuleAccess(str, Enum):
ALLOW = "Allow"
"""
Allow access.
"""
DENY = "Deny"
"""
Deny access.
"""
class PoolAllocationMode(str, Enum):
"""
The pool allocation mode also affects how clients may authenticate to the Batch Service API. If the mode is BatchService, clients may authenticate using access keys or Azure Active Directory. If the mode is UserSubscription, clients must use Azure Active Directory. The default is BatchService.
"""
BATCH_SERVICE = "BatchService"
"""
Pools will be allocated in subscriptions owned by the Batch service.
"""
USER_SUBSCRIPTION = "UserSubscription"
"""
Pools will be allocated in a subscription owned by the user.
"""
class PublicNetworkAccessType(str, Enum):
"""
If not specified, the default value is 'enabled'.
"""
ENABLED = "Enabled"
"""
Enables connectivity to Azure Batch through public DNS.
"""
DISABLED = "Disabled"
"""
Disables public connectivity and enables private connectivity to Azure Batch Service through private endpoint resource.
"""
class StorageAccountType(str, Enum):
"""
If omitted, the default is "Standard_LRS". Values are:
Standard_LRS - The data disk should use standard locally redundant storage.
Premium_LRS - The data disk should use premium locally redundant storage.
"""
STANDARD_LRS = "Standard_LRS"
"""
The data disk should use standard locally redundant storage.
"""
PREMIUM_LRS = "Premium_LRS"
"""
The data disk should use premium locally redundant storage.
"""
| [
"[email protected]"
] | |
0cfc589f684db4f5aed19ee3dc3ff803135e1f8a | f992a5b264b8ba117b5f90f3de942a6ce7a531d1 | /Code-Generation/v5/obj/Block.py | 2bb6eb3b7875e83d0022abd34026a7faa588c557 | [] | no_license | pean1128/UI2CODE-1 | 8f137c092199fb59171c64ccaa853d4df768f6c1 | 39bfe4780f823bc9bdafb933e441d97441e1abed | refs/heads/master | 2023-04-27T03:20:28.171023 | 2021-01-06T09:26:50 | 2021-01-06T09:26:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,123 | py | import pandas as pd
import cv2
from obj.Compo_HTML import CompoHTML
from obj.HTML import HTML
from obj.CSS import CSS
block_id = 0
def slice_blocks(compos_html, direction='v', is_slice_sub_block=True):
'''
Vertically or horizontally scan compos
:param compos_html: CompoHTML objects, including elements and lists
:return blocks: list of [block], block: list of [CompoHTML objects]
'''
blocks = []
block_compos = []
global block_id
dividers = []
divider = -1
prev_divider = 0
if direction == 'v':
compos_html.sort(key=lambda x: x.top)
for compo in compos_html:
# new block
if divider < compo.top:
prev_divider = divider
dividers.append(compo.top)
divider = compo.bottom
dividers.append(divider)
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_top=str(int(compo.top - prev_divider)) + 'px', clear='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-'+str(block_id), css={css_name: css}))
block_compos = []
# extend block
elif compo.top < divider < compo.bottom:
divider = compo.bottom
dividers[-1] = divider
block_compos.append(compo)
# collect left compos
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_top=str(int(block_compos[0].top - prev_divider)) + 'px', clear='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
elif direction == 'h':
compos_html.sort(key=lambda x: x.left)
for compo in compos_html:
# new block
if divider < compo.left:
prev_divider = divider
dividers.append(compo.left)
divider = compo.right
dividers.append(divider)
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_left=str(int(compo.left - prev_divider)) + 'px', float='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
block_compos = []
# extend block
elif compo.left < divider < compo.right:
divider = compo.right
dividers[-1] = divider
block_compos.append(compo)
# collect left compos
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_left=str(int(block_compos[0].left - prev_divider)) + 'px', float='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
return blocks
def visualize_blocks(blocks, img, img_shape):
board = cv2.resize(img, img_shape)
for block in blocks:
board = block.visualize(board, img_shape, show=False)
cv2.imshow('compos', board)
cv2.waitKey()
cv2.destroyWindow('compos')
class Block:
def __init__(self, id, compos,
is_slice_sub_block=True, html_tag=None, html_id=None, html_class_name=None, css=None):
self.block_id = id
self.compos = compos # list of CompoHTML objs
self.block_obj = None # CompoHTML obj
self.block_img = None
self.sub_blocks = [] # list of Block objs
self.top = None
self.left = None
self.bottom = None
self.right = None
self.width = None
self.height = None
# html info
self.html = None # HTML obj
self.html_tag = 'div' if html_tag is None else html_tag
self.html_id = html_id
self.html_class_name = html_class_name
self.html_script = '' # sting
self.css = css # CSS objs
self.css_script = '' # string
# only slice sub-block once
if is_slice_sub_block:
self.slice_sub_blocks()
if css is not None:
self.init_css()
self.init_boundary()
self.init_html()
def init_boundary(self):
self.top = min(self.compos, key=lambda x: x.top).top
self.bottom = max(self.compos, key=lambda x: x.bottom).bottom
self.left = min(self.compos, key=lambda x: x.left).left
self.right = max(self.compos, key=lambda x: x.right).right
def init_html(self):
self.html = HTML(tag=self.html_tag, id=self.html_id, class_name=self.html_class_name)
if len(self.sub_blocks) > 1:
# add compos of sub blocks
for sub_block in self.sub_blocks:
self.html.add_child(sub_block.html_script)
else:
for compo in self.compos:
self.html.add_child(compo.html_script)
self.html_script = self.html.html_script
def init_css(self):
if len(self.sub_blocks) > 1:
for sub_block in self.sub_blocks:
self.css.update(sub_block.css)
else:
for compo in self.compos:
self.css.update(compo.css)
self.css_script = self.css
self.assembly_css()
def assembly_css(self):
self.css_script = ''
for i in self.css:
self.css_script += self.css[i].css_script
# self.block_obj.css = self.css
def slice_sub_blocks(self):
'''
Horizontally slice the block into sub-blocks
'''
self.sub_blocks = slice_blocks(self.compos, 'h', is_slice_sub_block=False)
def clip_block_img(self, org, show=False):
self.block_img = org[self.top: self.bottom, self.left: self.right]
if show:
self.show_block_img()
def show_block_img(self):
cv2.imshow('block', self.block_img)
cv2.waitKey()
cv2.destroyWindow('block')
def visualize(self, img=None, img_shape=None, flag='line', show=True):
fill_type = {'line': 2, 'block': -1}
img_shape = img_shape
board = cv2.resize(img, img_shape)
board = cv2.rectangle(board, (self.left, self.top), (self.right, self.bottom), (0, 255, 0), fill_type[flag])
if show:
cv2.imshow('compo', board)
cv2.waitKey()
cv2.destroyWindow('compo')
return board
| [
"[email protected]"
] | |
f3fbe02ead08747a6e90915969d89590f9fef352 | 4520f56d4952c788e198ee7eee39911c9a76c60f | /01_Jump_to_python/0_example/4_0704_t/3_rhombus_v3.py | 80617b22c286553148a0003f08aed8440ff5eb4d | [] | no_license | SuHyeonJung/iot_python2019 | bef8877a1cd41981ad2125291f5af44f4fd1701c | 7860630ae28c53677a3c2761c9e997b28ea55f26 | refs/heads/master | 2020-06-14T22:18:27.503781 | 2019-11-08T05:50:41 | 2019-11-08T05:50:41 | 195,142,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | odd = 0
while True:
odd = int(input("홀수를 입력하세요(0 <- 종료): "))
end = int(odd / 2)
value = int(odd / 2)
second_end = end + 1
if odd == 0:
break
elif odd % 2== 0:
print("잘못 입력하셨습니다.다시 입력하세요.")
else:
count = 1
i = 1
print(end=' ')
print(odd*'-')
print()
while second_end > 0:
print('|', end = '')
while end != 0:
print(end=' ')
end = end - 1
end = int(odd /2) - i
print(count*'*', end = value*' ')
count = count + 2
value = value - 1
second_end = second_end - 1
i = i + 1
print('|')
end = int(odd / 2)
mirror = int(odd / 2)
star = odd - 2
k = mirror - 1
j = 1
second_value = 1
while end >= 1:
print('|', end = '')
while k < mirror:
print(end=' ')
k = k + 1
print(star*'*', end = second_value*' ')
star = star - 2
end = end - 1
j = j + 1
k = k - j
second_value = second_value + 1
print('|')
print()
print(end=' ')
print(odd*'-')
print("프로그램을 종료합니다.") | [
"[email protected]"
] | |
bc97a879f29cc25c84cbe0858be8759cafd2f892 | 1ac7f1035545fc9b1cbb1fee84bf5bdd1b70a991 | /neural/othermodels.py | a03cc3b06439630551e05442d7794741b0a44f6c | [] | no_license | HussainAther/neuroscience | 1e1225d811c64c260825540c210a0f7925386085 | 624bf82ce5c610c2ca83a0c4c49d3f4d0b92a1e2 | refs/heads/master | 2023-05-25T15:25:36.617994 | 2023-05-13T23:25:18 | 2023-05-13T23:25:18 | 157,743,251 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | import numpy as np
import pylab as plt
from scipy.integrate import odeint
"""
McCulloch-Pitts replaces the involved Hodgkin-Huxley system by a threshold device
with only two states (0 and 1) in which 0 denotes the inactivated, silent condition
and 1 denotes the activiated, firing state. We use the equation:
X(t+1) = Theta(I_i - theta_i)
in which t is the discretized time, theta_i is the activation threshold for unit i,
and I_i = X_i are the data that has been identified. In this case,
Postsynpatic current I_iPSC = - summation of j=1 to n of (w_ij * I_j - I-i)
in which w_ij is the synaptic weight of the connection from unit j to unit i,
dependent on the synpatic gain g_ij that evolves during learning and reflecting the synaptic plasticity
and the intracelular resistances. The capacitance is deliberately neglected and I_iext is the
externally controlled input to neuron i.
"""
def simplemodel(w, I):
"""
Simple conductance model for some neuron i.
w is a list of weights of connection from unit j to i for each neuron.
I is a list of currents from each neuron j.
"""
I_iPSC = 0 # post-synaptic current
I_ext = 5 # external current to neuron i
for j in range(len(w)): # simple summation
I_iPSC += w[j] * I[j]
I_iPSC -= I_iext # exteranl current
I_iPSC *= -1 # for the opposite direction
return I_iPSC
def mp(simplemodel, theta_i):
"""
McCulloch-Pitts model. theta_i is the activation threshold for unit i.
"""
X = [] # state of neuron i
for j in simplemodel:
if j - theta_i >= 0:
X.append(1)
else:
X.append(0)
return X
def fn(s):
"""
FitzHugh and Nagumo approximated the Hodgkin-Huxley equations using a general linear relation (h = a-bn)
used in combination with coordinate transformation and rescaling to arrive at the Bonhoeffer-Van-der-Pol
or FitzHugh-Nagumo equations. Takes in s, an array of states of the voltage for each neuron. It must have
the four states: voltage of first synapse, work of the first synapse, voltage of the second, and work of the second.
Solve for two synapses using ordinary differential equations.
"""
(v, w, v2, w2) = (state[0], state[1], state[3], state[4])
# constants used for fitting to general linear relation
x = 0.08
y = 0.7
z = 0.8
theta = 0 # voltage phase shift
Vs = 2 # applied voltage
Iapp = 1.2 # applied current
gsyn = 30 # synaptic conductance in pS
# Synaptic currents
Isyn = gsyn*((v-Vs))/(1+np.power(np.e,(v2-theta)))
Isyn2 = gsyn*((v2-Vs))/(1+np.power(np.e,(v-theta)))
# synapse 1
vd = v - np.power(v, 3)/3 - w + Iapp + Isyn
wd = x*(v + y - z*w)
# synapse 2
v2d = v2 - np.power(v2, 3)/3 - w2 + Iapp + Isyn2
w2d = x*(v2 + y - z*w2)
# return state derivatives that odeint uses
return [vd, wd, v2d, w2d]
s = ([-1.2, 1.2, -1.2, 1.2])
t = np.arange(0.0, 2800.0, 0.01)
odeint(fn, s, t, rtol=1.49012e-13, atol=1.49012e-13)
"""
Morris-Lecar model described spiking dynamics of potassium- and calcium-controlled muscle fibers.
"""
# Constants
C_m = 1.0 # membrane capacitance, in uF/cm^2
g_Ca = 1.1 # maximum conducances, in mS/cm^2
g_K = 2.0
g_L = 0.5
E_Ca = 100.0 # Nernst reversal potentials, in mV
E_K = -70.0
E_L = -50.0
def m_infty(V):
"""
Membrane voltage derived from Fourier transform of the derivative of the signal.
Returns the open-state probability function of the open state of the channel. They"re
partitioned according to a Boltzmann distribution.
"""
return (1.0 + sp.tanh((V + 1.0) / 15.0)) / 2.0
def w_infty(V):
"""
Same but for the closed state of the channel.
"""
return (1.0 + sp.tanh(V / 30.0)) / 2.0
def tau_w(V):
"""
Memnbrane time constant.
"""
return 5.0 / sp.cosh(V / 60.0) # in ms
def I_ext(t):
"""
Input voltage.
"""
return 10*sp.floor(t/100)
t = sp.arange(0.0, 400.0, 0.1)
I = I_ext(t)
def ml(V, w,t):
"""
Morris-Lecar using odeint again. V and w are initial conditions.
"""
V, w = X # initial conditions for
dVdt = (I_ext(t) - I_Ca(V) - I_K(V, w) - I_L(V)) / C_m
dwdt = (w_infty(V) - w) / tau_w(V)
return dVdt, dwdt
X = odeint(ml(-44, .05) t)
| [
"[email protected]"
] | |
5a115ced7ec699776c2e747f924d8bf8722a9b7d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02661/s028332585.py | 9e89e79c8a1f08abee3a9c1c567320963f0d6e4a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | N = int(input())
A = []
B = []
for _ in range(N):
a, b = map(int, input().split())
A.append(a)
B.append(b)
def median(arr):
arr.sort()
n = len(arr)
if n % 2 == 1:
return arr[(n + 1) // 2 - 1]
else:
return (arr[n//2 - 1] + arr[n//2]) / 2
med_a = median(A)
med_b = median(B)
if N % 2 == 1:
ans = int(med_b) - int(med_a) + 1
else:
ans = med_b * 2 - med_a * 2 + 1
ans = int(ans)
print(ans) | [
"[email protected]"
] | |
21af6fdb705f8c486819d5022b336934d697fd99 | 7f8084cc37db622fb50cfa656ed1d0d4da007fe3 | /model.py | 1eb0af7c003f16f41327fe1b5e67595823862521 | [] | no_license | jessicagamio/testing-py | a3454878f1dbe1e2f914d2a126748b70dcbdc701 | 9d1745f1078dd3754dd62a622a94d61d78342e24 | refs/heads/master | 2021-06-22T14:25:58.300155 | 2019-08-09T23:34:54 | 2019-08-09T23:34:54 | 201,549,658 | 0 | 0 | null | 2021-03-20T01:28:20 | 2019-08-09T22:41:51 | Python | UTF-8 | Python | false | false | 1,024 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Game(db.Model):
"""Board game."""
__tablename__ = "games"
game_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(100))
def connect_to_db(app, db_uri="postgresql:///games"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.app = app
db.init_app(app)
def example_data():
"""Create example data for the test database."""
# FIXME: write a function that creates a game and adds it to the database.
Game.query.delete()
game1 = Game(name="Game One", description="This is Game One")
game2 = Game(name="Game Two", description="This is Game Two")
game3 = Game(name="Game Three", description="This is Game Three")
db.session.add_all([game1, game2, game3])
db.session.commit()
if __name__ == '__main__':
from party import app
connect_to_db(app)
print("Connected to DB.")
| [
"[email protected]"
] | |
541da3b572ffefac450ea689dfd561a0cf40fba9 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/HiFaceGAN/src/util.py | d803a60fc82b93ac6689d07c104cb16cc4b1c51c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 2,673 | py | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for model"""
import random
import cv2
import mindspore as ms
import mindspore.nn as nn
import numpy as np
def set_global_seed(i):
"""Set global seed"""
ms.set_seed(i)
np.random.seed(i)
random.seed(i)
def image2numpy(image):
"""Transform image to numpy array"""
image = image.asnumpy()
image = np.rint(np.clip(np.transpose(image, (1, 2, 0)) * 255, a_min=0, a_max=255)).astype(np.uint8)
return image
def make_joined_image(im1, im2, im3):
"""Create joined image"""
im1 = image2numpy(im1)
im2 = image2numpy(im2)
im3 = image2numpy(im3)
height, _, _ = im1.shape
joined_image = np.zeros((height, height * 3, 3), dtype=np.uint8)
joined_image[:, :height] = im1
joined_image[:, height: 2 * height] = im2
joined_image[:, 2 * height:] = im3
return joined_image
def save_image(image, image_path):
"""Save image"""
cv2.imwrite(image_path, image)
def clip_adam_param(beta):
"""Clip Adam betas"""
return min(max(1e-6, beta), 1 - 1e-6)
def get_lr(initial_lr, lr_policy, num_epochs, num_epochs_decay, dataset_size):
"""
Learning rate generator.
For 'linear', we keep the same learning rate for the first <num_epochs>
epochs and linearly decay the rate to zero over the next
<num_epochs_decay> epochs.
"""
if lr_policy == 'linear':
lrs = [initial_lr] * dataset_size * num_epochs
for epoch in range(num_epochs_decay):
lr_epoch = initial_lr * (num_epochs_decay - epoch) / num_epochs_decay
lrs += [lr_epoch] * dataset_size
return ms.Tensor(np.array(lrs).astype(np.float32))
if lr_policy == 'constant':
return initial_lr
raise ValueError(f'Unknown lr_policy {lr_policy}')
def enable_batch_statistics(net):
"""Enable batch statistics in all BatchNorms"""
if isinstance(net, nn.BatchNorm2d):
net.use_batch_statistics = True
else:
for cell in net.cells():
enable_batch_statistics(cell)
| [
"[email protected]"
] | |
b9c3df3f4295b0a7f0bc2813d087a40f6b0aafff | 0f274618632a0cc96ac0b49e1f784759cfaf3a41 | /pytorch_models/unet/blocks.py | d45af78870e1646648fb4215ecce94f7de54dea5 | [] | no_license | brookisme/pytorch_models | 2cd0adf5104606ffd38c90af11fe330ce15fc7c5 | 2bd1c8527d7da7dda414250c28fab2fb02315821 | refs/heads/master | 2020-06-01T23:02:00.046636 | 2020-01-30T05:37:53 | 2020-01-30T05:37:53 | 190,959,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | import torch.nn as nn
import pytorch_models.blocks as blocks
from pytorch_models.deeplab.blocks import ASPP
class RSPP(nn.Module):
""" Residual Spatial Pyramid Pooling
Removes the "Atrous" from a modified "Atrous Spatial Pyramid Pooling"
blocks and added a residual skip connection. By default also
turns off the pooling in the ASSP.
Defaults Example: x-> F_5(x)+ F_3(x) + x
Args:
in_ch<int>: number of input channels
out_ch<int|None>: if out_ch is None out_ch=in_ch
kernel_sizes<list[int]>: kernel_size for each conv in stack
pooling<bool>: include image_pooling block
residual<bool>:
- if False just return the block without residual
- for use in architectures where the skip connection is optional
shortcut_method<str>: see blocks.Residual docs
spp_config: config for underlying aspp block
"""
def __init__(self,
in_ch,
out_ch=None,
kernel_sizes=[5,3],
pooling=False,
residual=True,
shortcut_method=blocks.Residual.AUTO_SHORTCUT,
spp_config={}):
super(RSPP, self).__init__()
if out_ch is None:
out_ch=in_ch
self.in_ch=in_ch
self.out_ch=out_ch
spp=self._spp(kernel_sizes,pooling,spp_config)
self.rspp=blocks.Residual(
in_ch=self.in_ch,
out_ch=self.out_ch,
block=spp,
is_residual_block=residual,
shortcut_stride=1,
shortcut_method=shortcut_method)
def forward(self,x):
return self.rspp(x)
def _spp(self,kernel_sizes,pooling,config):
config['kernel_sizes']=kernel_sizes
config['pooling']=pooling
config['dilations']=config.get('dilations',[1]*len(kernel_sizes))
config['join_method']=ASPP.ADD
if config.get('out_conv_config') is None:
config['out_conv_config']=False
return ASPP(self.in_ch,self.out_ch,**config)
| [
"[email protected]"
] | |
25e771061a927d9aeb0fc3aae302dc795ffb378e | 5068bc927a7fff73923ce95862ff70120160c491 | /electrum_axe/gui/qt/request_list.py | 3be7ce6113404ba464b82efed458c087817fdc27 | [
"MIT"
] | permissive | AXErunners/electrum-axe | cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f | 7ef05088c0edaf0688fb167df353d6da619ebf2f | refs/heads/master | 2021-04-03T09:40:37.109317 | 2020-08-27T16:53:18 | 2020-08-27T16:53:18 | 124,705,752 | 336 | 75 | MIT | 2020-10-17T18:30:25 | 2018-03-10T23:00:48 | Python | UTF-8 | Python | false | false | 7,068 | py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QMenu
from PyQt5.QtCore import Qt
from electrum_axe.i18n import _
from electrum_axe.util import format_time, age
from electrum_axe.plugin import run_hook
from electrum_axe.paymentrequest import PR_UNKNOWN
from electrum_axe.wallet import InternalAddressCorruption
from .util import MyTreeView, pr_tooltips, pr_icons, read_QIcon
class RequestList(MyTreeView):
class Columns(IntEnum):
DATE = 0
ADDRESS = 1
SIGNATURE = 2
DESCRIPTION = 3
AMOUNT = 4
STATUS = 5
headers = {
Columns.DATE: _('Date'),
Columns.ADDRESS: _('Address'),
Columns.SIGNATURE: '',
Columns.DESCRIPTION: _('Description'),
Columns.AMOUNT: _('Amount'),
Columns.STATUS: _('Status'),
}
filter_columns = [Columns.DATE, Columns.ADDRESS, Columns.SIGNATURE, Columns.DESCRIPTION, Columns.AMOUNT]
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.DESCRIPTION,
editable_columns=[])
self.setModel(QStandardItemModel(self))
self.setSortingEnabled(True)
self.setColumnWidth(self.Columns.DATE, 180)
self.update()
self.selectionModel().currentRowChanged.connect(self.item_changed)
def item_changed(self, idx):
# TODO use siblingAtColumn when min Qt version is >=5.11
addr = self.model().itemFromIndex(idx.sibling(idx.row(), self.Columns.ADDRESS)).text()
req = self.wallet.receive_requests.get(addr)
if req is None:
self.update()
return
expires = age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
message = req['memo']
self.parent.receive_address_e.setText(addr)
self.parent.receive_message_e.setText(message)
self.parent.receive_amount_e.setAmount(amount)
self.parent.expires_combo.hide()
self.parent.expires_label.show()
self.parent.expires_label.setText(expires)
self.parent.new_request_button.setEnabled(True)
def update(self):
self.wallet = self.parent.wallet
# hide receive tab if no receive requests available
if self.parent.isVisible():
b = len(self.wallet.receive_requests) > 0
self.setVisible(b)
self.parent.receive_requests_label.setVisible(b)
if not b:
self.parent.expires_label.hide()
self.parent.expires_combo.show()
# update the receive address if necessary
current_address = self.parent.receive_address_e.text()
domain = self.wallet.get_receiving_addresses()
try:
addr = self.wallet.get_unused_address()
except InternalAddressCorruption as e:
self.parent.show_error(str(e))
addr = ''
if current_address not in domain and addr:
self.parent.set_receive_address(addr)
self.parent.new_request_button.setEnabled(addr != current_address)
self.parent.update_receive_address_styling()
self.model().clear()
self.update_headers(self.__class__.headers)
self.hideColumn(self.Columns.ADDRESS)
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req['memo']
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.parent.format_amount(amount) if amount else ""
labels = [date, address, '', message, amount_str, pr_tooltips.get(status,'')]
items = [QStandardItem(e) for e in labels]
self.set_editability(items)
if signature is not None:
items[self.Columns.SIGNATURE].setIcon(read_QIcon("seal.png"))
items[self.Columns.SIGNATURE].setToolTip(f'signed by {requestor}')
if status is not PR_UNKNOWN:
items[self.Columns.STATUS].setIcon(read_QIcon(pr_icons.get(status)))
items[self.Columns.DESCRIPTION].setData(address, Qt.UserRole)
self.model().insertRow(self.model().rowCount(), items)
self.filter()
def create_menu(self, position):
idx = self.indexAt(position)
item = self.model().itemFromIndex(idx)
# TODO use siblingAtColumn when min Qt version is >=5.11
item_addr = self.model().itemFromIndex(idx.sibling(idx.row(), self.Columns.ADDRESS))
if not item_addr:
return
addr = item_addr.text()
req = self.wallet.receive_requests.get(addr)
if req is None:
self.update()
return
column = idx.column()
column_title = self.model().horizontalHeaderItem(column).text()
column_data = item.text()
menu = QMenu(self)
if column != self.Columns.SIGNATURE:
if column == self.Columns.AMOUNT:
column_data = column_data.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
menu.addAction(_("Copy URI"), lambda: self.parent.view_and_paste('URI', '', self.parent.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.parent.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.parent.delete_payment_request(addr))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.viewport().mapToGlobal(position))
| [
"[email protected]"
] | |
6559879e77fdb556569fca0a190b2284efe64138 | e49cfd0ec48f2fc524b5f002bbea6a566f7b1dd4 | /synapse/lib/types.py | c9d524322eeebde5b6fb97a3b700dea750fa6819 | [
"Apache-2.0"
] | permissive | k1derly-fe/synapse | 0a069a4407e6aaeac26f4347ccdedc4a3fd2a556 | 603f7a7dfc9a10b603f6c8d97a9f33855c59a4c9 | refs/heads/master | 2020-04-05T15:05:49.513535 | 2016-06-30T16:52:05 | 2016-06-30T16:52:05 | 62,193,565 | 0 | 0 | null | 2016-06-29T03:45:55 | 2016-06-29T03:45:55 | null | UTF-8 | Python | false | false | 12,311 | py | from __future__ import absolute_import,unicode_literals
import re
import time
import socket
import struct
import datetime
import synapse.lib.urlhelp as s_urlhelp
from synapse.common import *
class DataType:
subprops = ()
def __init__(self, tlib, name, **info):
self.tlib = tlib
self.name = name
self.info = info
def _raiseBadValu(self, valu, **info):
raise BadTypeValu(name=self.name, valu=valu, **info)
def subs(self):
'''
Implement if the presence of a property with this type requires sub props.
'''
return self.subprops
def chop(self, valu):
'''
Returns a tuple of (norm,subs) for the given valu.
'''
return self.norm(valu),{}
def extend(self, name, **info):
'''
Construct a new subtype from this instance.
'''
for k,v in self.info.items():
info.setdefault(k,v)
return self.__class__(self.tlib, name,**info)
class StrType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self.regex = None
regex = info.get('regex')
if regex != None:
self.regex = re.compile(regex)
def repr(self, valu):
return valu
def norm(self, valu):
if self.info.get('lower'):
valu = valu.lower()
enums = self.info.get('enums')
if enums != None and valu not in enums:
self._raiseBadValu(valu,enums=enums)
if self.regex != None and not self.regex.match(valu):
self._raiseBadValu(valu,regex=self.info.get('regex'))
return valu
def parse(self, valu):
return self.norm(valu)
class IntType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self.fmt = info.get('fmt','%d')
#self.modval = info.get('mod',None)
self.minval = info.get('min',None)
self.maxval = info.get('max',None)
def repr(self, valu):
return self.fmt % valu
def norm(self, valu):
if self.minval != None and valu < self.minval:
self._raiseBadValu(valu,minval=self.minval)
if self.maxval != None and valu > self.maxval:
self._raiseBadValu(valu,maxval=self.maxval)
return valu
def parse(self, valu):
try:
valu = int(valu,0)
except Exception as e:
raise self._raiseBadValu(valu)
return self.norm(valu)
class BoolType(DataType):
def norm(self, valu):
return int(bool(valu))
def repr(self, valu):
return repr(bool(valu))
def parse(self, text):
text = text.lower()
if text in ('true','t','y','yes','1'):
return 1
if text in ('false','f','n','no','0'):
return 0
self._raiseBadValu(text)
def ipv4str(valu):
byts = struct.pack('>I',valu)
return socket.inet_ntoa(byts)
def ipv4int(valu):
byts = socket.inet_aton(valu)
return struct.unpack('>I', byts)[0]
class IPv4Type(DataType):
def norm(self, valu):
return valu & 0xffffffff
def repr(self, valu):
return ipv4str(valu)
def parse(self, text):
return ipv4int(text)
def ipv6norm(text):
return socket.inet_ntop( socket.AF_INET6, socket.inet_pton( socket.AF_INET6, text ) )
class IPv6Type(DataType):
def norm(self, valu):
try:
return ipv6norm(valu)
except Exception as e:
self._raiseBadValu(valu)
def parse(self, text):
return self.norm(text)
def repr(self, valu):
return valu
#class HostPort(DataType):
class Srv4Type(DataType):
'''
Base type for <ipv4>:<port> format.
'''
subprops = (
tufo('port', ptype='inet:port'),
tufo('ipv4', ptype='inet:ipv4'),
)
def norm(self, valu):
return valu & 0xffffffffffff
def repr(self, valu):
addr = valu >> 16
port = valu & 0xffff
return '%s:%d' % ( ipv4str(addr), port )
def chop(self, valu):
addr = valu >> 16
port = valu & 0xffff
return valu,{'port':port,'ipv4':addr}
def parse(self, text):
try:
astr,pstr = text.split(':')
except ValueError as e:
raise BadTypeValu(name=self.name,valu=text)
addr = ipv4int(astr)
port = int(pstr,0)
return ( addr << 16 ) | port
srv6re = re.compile('^\[([a-f0-9:]+)\]:(\d+)$')
class Srv6Type(DataType):
'''
Base type for [IPv6]:port format.
'''
subprops = (
tufo('port', ptype='inet:port'),
tufo('ipv6', ptype='inet:ipv6'),
)
def norm(self, valu):
return self.chop(valu)[0]
def chop(self, valu):
valu = valu.lower()
m = srv6re.match(valu)
if m == None:
self._raiseBadValu(valu, ex='[af::2]:80')
host,portstr = m.groups()
port = int(portstr,0)
if port > 0xffff or port < 0:
self._raiseBadValu(valu, port=port)
try:
host = ipv6norm(host)
except Exception as e:
self._raiseBadValu(valu)
valu = '[%s]:%d' % (host,port)
return valu,{'ipv6':host,'port':port}
def parse(self, text):
return self.norm(text)
def repr(self, valu):
return valu
class EmailType(DataType):
subprops = (
tufo('user',ptype='inet:user'),
tufo('fqdn',ptype='inet:fqdn'),
)
def norm(self, valu):
try:
user,fqdn = valu.split('@',1)
except ValueError as e:
self._raiseBadValu(valu)
return '%s@%s' % (user.lower(),fqdn.lower())
def parse(self, text):
return self.norm(text)
def repr(self, valu):
return valu
class UrlType(DataType):
#subprops = (
#tufo('fqdn',ptype='inet:fqdn'),
#tufo('ipv4',ptype='inet:ipv4'),
#tufo('ipv6',ptype='inet:ipv6'),
#tufo('port',ptype='inet:port'),
#)
def parse(self, text):
return self.norm(text)
def norm(self, valu):
respath = ''
resauth = ''
if valu.find('://') == -1:
raise BadTypeValu(name=self.name,valu=valu)
scheme,resloc = valu.split('://',1)
parts = resloc.split('/',1)
if len(parts) == 2:
resloc,respath = parts
if resloc.find('@') != -1:
resauth,resloc = resloc.split('@',1)
# FIXME chop sub props from resloc!
scheme = scheme.lower()
hostpart = resloc.lower()
if resauth:
hostpart = '%s@%s' % (resauth,hostpart)
return '%s://%s/%s' % (scheme,hostpart,respath)
def repr(self, valu):
return valu
class EpochType(DataType):
def norm(self, valu):
return int(valu)
def parse(self, text):
text = text.strip().lower()
text = (''.join([ c for c in text if c.isdigit() ]))[:14]
tlen = len(text)
if tlen == 4:
st = time.strptime(text, '%Y')
elif tlen == 6:
st = time.strptime(text, '%Y%m')
elif tlen == 8:
st = time.strptime(text, '%Y%m%d')
elif tlen == 10:
st = time.strptime(text, '%Y%m%d%H')
elif tlen == 12:
st = time.strptime(text, '%Y%m%d%H%M')
elif tlen == 14:
st = time.strptime(text, '%Y%m%d%H%M%S')
else:
raise Exception('Unknown time format: %s' % text)
e = datetime.datetime(1970,1,1)
d = datetime.datetime(st.tm_year, st.tm_mon, st.tm_mday)
epoch = int((d - e).total_seconds())
epoch += st.tm_hour*3600
epoch += st.tm_min*60
epoch += st.tm_sec
return epoch
def repr(self, valu):
dt = datetime.datetime(1970,1,1) + datetime.timedelta(seconds=int(valu))
return '%d/%.2d/%.2d %.2d:%.2d:%.2d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
class TypeLib:
'''
An extensible type library for use in cortex data models.
'''
def __init__(self):
self.types = {}
self.subtypes = []
self.addType(IntType(self,'int'))
self.addType(StrType(self,'str'))
self.addType(BoolType(self,'bool'))
self.addSubType('syn:tag','str', regex=r'^([\w]+\.)*[\w]+$', lower=1)
self.addSubType('syn:prop','str', regex=r'^([\w]+:)*[\w]+$', lower=1)
self.addSubType('syn:type','str', regex=r'^([\w]+:)*[\w]+$', lower=1)
self.addSubType('text', 'str')
self.addSubType('str:lwr', 'str', lower=1)
self.addSubType('geo:latlong', 'str', regex='^[-+]?([1-8]?\d(\.\d+)?|90(\.0+)?),\s*[-+]?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$')
self.addSubType('guid', 'str', regex='^[0-9a-f]{32}$', lower=1)
self.addSubType('hash:md5','str', regex='^[0-9a-f]{32}$', lower=1)
self.addSubType('hash:sha1','str', regex='^[0-9a-f]{40}$', lower=1)
self.addSubType('hash:sha256','str', regex='^[0-9a-f]{64}$', lower=1)
self.addSubType('hash:sha384','str', regex='^[0-9a-f]{96}$', lower=1)
self.addSubType('hash:sha512','str', regex='^[0-9a-f]{128}$', lower=1)
# time types
self.addType( EpochType(self,'time:epoch') )
# inet types
self.addType(IPv4Type(self,'inet:ipv4'))
self.addType(IPv6Type(self,'inet:ipv6'))
self.addType(Srv4Type(self,'inet:srv4'))
self.addType(Srv6Type(self,'inet:srv6'))
self.addSubType('inet:tcp4','inet:srv4')
self.addSubType('inet:udp4','inet:srv4')
self.addSubType('inet:tcp6','inet:srv6')
self.addSubType('inet:udp6','inet:srv6')
self.addType(UrlType(self,'inet:url'))
self.addType(EmailType(self,'inet:email'))
self.addSubType('inet:asn', 'int')
self.addSubType('inet:user','str')
self.addSubType('inet:passwd','str')
self.addSubType('inet:filepath','str')
self.addSubType('inet:fqdn','str', regex='^[a-z0-9.-_]+$', lower=1)
self.addSubType('inet:mac', 'str', regex='^([0-9a-f]{2}[:]){5}([0-9a-f]{2})$', lower=1)
self.addSubType('inet:port', 'int', min=0, max=0xffff)
def addSubType(self, name, subof, **info):
'''
Add a new type which extends from parent type's class.
Example:
tlib.addSubType('guid:org', 'guid', doc='guid for an org')
'''
if self.types.get(name) != None:
raise DupTypeName(name=name)
info['subof'] = subof
base = self.reqDataType(subof)
self.addType( base.extend(name, **info) )
self.subtypes.append( (name,info) )
def getDataType(self, name):
'''
Return the DataType subclass for the given type name.
'''
return self.types.get(name)
def reqDataType(self, name):
'''
Return a reference to the named DataType or raise NoSuchType.
'''
item = self.getDataType(name)
if item == None:
raise NoSuchType(name=name)
return item
def addType(self, item):
'''
Add a type object which extends from DataType.
class MyType(DataType):
def __init__(self):
DataType.__init__(self,'my:type')
#def repr(self, valu):
#def norm(self, valu):
#def parse(self, text):
tlib.addType( MyType() )
'''
self.types[item.name] = item
def getTypeNorm(self, name, valu):
'''
Normalize a type specific value in system mode.
Example:
fqdn = tlib.getTypeNorm('inet:fqdn','Foo.Com')
'''
return self.reqDataType(name).norm(valu)
def getTypeRepr(self, name, valu):
'''
Return the humon readable form of the given type value.
Example:
print( tlib.getTypeRepr('inet:ipv4', ipv4addr) )
'''
return self.reqDataType(name).repr(valu)
def getTypeParse(self, name, text):
'''
Parse input text for the given type into it's system form.
Example:
ipv4 = tlib.getTypeParse('inet:ipv4','1.2.3.4')
'''
return self.reqDataType(name).parse(text)
| [
"[email protected]"
] | |
70d55931728e32b6b0ada6a287f875e2e79e7fb5 | ccb9425e4655cee0c06f3759e21e8477eb111d8f | /django_eveonline_connector/migrations/0010_auto_20191211_1514.py | a0b18ff1beebbef596039a7f2c63880f6b0a313f | [
"MIT"
] | permissive | KryptedGaming/django-eveonline-connector | 38fac86666598e0e9b6f0e330806e54678363a88 | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | refs/heads/master | 2021-06-12T10:02:26.495615 | 2021-05-03T17:24:05 | 2021-05-03T17:24:05 | 201,830,836 | 3 | 2 | MIT | 2021-06-10T20:39:05 | 2019-08-12T00:34:21 | Python | UTF-8 | Python | false | false | 530 | py | # Generated by Django 2.2.8 on 2019-12-11 15:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_eveonline_connector', '0009_auto_20191210_1538'),
]
operations = [
migrations.AlterField(
model_name='evecharacter',
name='token',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='django_eveonline_connector.EveToken'),
),
]
| [
"[email protected]"
] | |
e8e1ea746c2d2d55c761d003d71ad43f43480823 | 94180b918cc2c590c5868113d3e604ade34473b3 | /jayd3e/models.py | 44f37beec4764775f20a77652560387d2d785d77 | [] | no_license | jayd3e-archive/jayd3e | beb9daf007915c96645512884387054adb247a51 | 88a0d91b7aaa7907260d802935b198892623934e | refs/heads/master | 2020-06-07T21:51:21.056787 | 2013-04-28T18:43:10 | 2013-04-28T18:43:10 | 1,386,947 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from sqlalchemy import (
Column,
String,
Integer,
Date,
DateTime
)
from sqlalchemy.ext.declarative import declarative_base
class BaseClass(object):
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=BaseClass)
def initializeDb(engine):
Base.metadata.bind = engine
class Post(Base):
__tablename__ = 'posts'
title = Column(String(50))
body = Column(String(2000))
date = Column(Date)
created = Column(DateTime)
change_time = Column(DateTime)
def __repr__(self):
return "<Post('%s')>" % (self.id)
| [
"[email protected]"
] | |
fd6f2e143779b6958f7dd6418992c04c7fb590d1 | 05cd0bee988d6d02841b23689517304defe7d00f | /goaway/emitter.py | 36188322a55e67ed3f97af2abc659314bd67f188 | [] | no_license | podhmo/goaway | cd41043c6386f58958a99f26009ef6f873716e23 | f870b4833a3aaf303c7ce161b78096e0b33e3c11 | refs/heads/master | 2021-10-11T11:05:11.639598 | 2021-10-06T01:57:56 | 2021-10-06T01:57:56 | 90,339,657 | 0 | 1 | null | 2021-10-06T01:52:23 | 2017-05-05T05:20:58 | Python | UTF-8 | Python | false | false | 697 | py | import os.path
import logging
logger = logging.getLogger(__name__)
class Emitter:
def __init__(self, repository):
self.repository = repository
def emit_package(self, package, d=None, onemit=None):
return dumptree(self.repository.writer, package, d=d, onemit=onemit)
emit = emit_package
def dumptree(writer, package, d=None, onemit=None):
d = d or package.filepath
os.makedirs(d, exist_ok=True)
for f in package.files.values():
fpath = os.path.join(d, f.name)
with open(fpath, "w") as wf:
logger.info("write: %s", fpath)
wf.write(str(writer.write(f)))
if onemit is not None:
onemit(f, fpath)
| [
"[email protected]"
] | |
4fe1ebc7236a6d0e7e5805f2cd720f6a72951b89 | e1cf0e9941e72d06870baa63c792f1123f325762 | /classify/english_classify/SST_FlyAI/processor.py | c70e6453c71c1d7febd4757ea8ac1d301aa1819b | [] | no_license | yphacker/flyai_nlp | 1ab79be510d82fb0e9bc7d5d823c3fbaf9bf2ce5 | 78a8cd8680190dacc053993fe4a00d2391a62408 | refs/heads/master | 2020-07-16T02:22:03.338890 | 2020-01-02T12:52:14 | 2020-01-02T12:52:14 | 205,699,001 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py | # -*- coding: utf-8 -*
import os
from flyai.processor.base import Base
import config
import bert.tokenization as tokenization
from bert.run_classifier import convert_single_example_simple
# class Processor(Base):
# def __init__(self):
# self.token = None
#
# def input_x(self, sentence):
# '''
# 参数为csv中作为输入x的一条数据,该方法会被Dataset多次调用
# '''
# if self.token is None:
# bert_vocab_file = os.path.join(DATA_PATH, "model", "uncased_L-12_H-768_A-12", 'vocab.txt')
# self.token = tokenization.CharTokenizer(vocab_file=bert_vocab_file)
# word_ids, word_mask, word_segment_ids = convert_single_example_simple(max_seq_length=256, tokenizer=self.token,
# text_a=sentence)
# return word_ids, word_mask, word_segment_ids
#
# def input_y(self, label):
# '''
# 参数为csv中作为输入y的一条数据,该方法会被Dataset多次调用
# '''
# return [label]
#
# def output_y(self, index):
# '''
# 验证时使用,把模型输出的y转为对应的结果
# '''
#
# if index >= 0.5:
# return 1
# return 0
class Processor(Base):
def __init__(self):
self.token = None
def input_x(self, sentence):
'''
参数为csv中作为输入x的一条数据,该方法会被Dataset多次调用
'''
if self.token is None:
bert_vocab_file = os.path.join(config.DATA_PATH, "model", "uncased_L-24_H-1024_A-16", 'vocab.txt')
self.token = tokenization.FullTokenizer(vocab_file=bert_vocab_file)
word_ids, word_mask, word_segment_ids = \
convert_single_example_simple(config.max_seq_length, tokenizer=self.token, text_a=sentence)
return word_ids, word_mask, word_segment_ids
def input_y(self, label):
'''
参数为csv中作为输入y的一条数据,该方法会被Dataset多次调用
'''
return label
def output_y(self, data):
'''
验证时使用,把模型输出的y转为对应的结果
'''
return data[0]
| [
"[email protected]"
] | |
15f3f035f2e8a0fe2a55846ba021bfa4732d8946 | b6d8049568e8068422bc4ac2b957972dc1ee0ab7 | /ui_extensions/cloudendure/__init__.py | 01817f14f2dc5e1ff60b1d569706c52ba539c117 | [
"Apache-2.0"
] | permissive | CloudBoltSoftware/cloudbolt-forge | a6dffd52bd074ad48a61527502fcddf8113508ff | a5584a84841be49bea69df506a91d18cb3f215d8 | refs/heads/master | 2023-08-11T08:08:07.730807 | 2023-08-10T11:40:07 | 2023-08-10T11:40:07 | 39,207,603 | 37 | 44 | Apache-2.0 | 2023-09-14T20:06:44 | 2015-07-16T16:18:02 | Python | UTF-8 | Python | false | false | 300 | py | """
Provides features to support CloudEndure server Migration integration.
This integration helps with creating a cloudendure project, adding a vm
to a project and starting replication. It also enables users to start
migration to AWS. Users can also install agents to servers without an
agent.
""" | [
"[email protected]"
] | |
6feb5f7e88ae1c948ac192a971e99bcfe1b13bd8 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-CoreServices/PyObjCTest/test_lssharedfilelist.py | 9624ae2b03efaa607e39d773668c2e0fcb634b77 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,812 | py |
from PyObjCTools.TestSupport import *
import CoreServices
import os
class TestLSSharedFileList (TestCase):
def testTypes(self):
self.assertIsCFType(CoreServices.LSSharedFileListRef)
self.assertIsCFType(CoreServices.LSSharedFileListItemRef)
@min_os_level('10.5')
def testConstants10_5(self):
self.assertIsInstance(CoreServices.kLSSharedFileListFavoriteVolumes, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListFavoriteItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentApplicationItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentDocumentItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentServerItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListSessionLoginItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListGlobalLoginItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentItemsMaxAmount, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesComputerVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesIDiskVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesNetworkVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListItemHidden, unicode)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(CoreServices.kLSSharedFileListLoginItemHidden, unicode)
@min_os_level('10.5')
def testMagicConstants10_5(self):
self.assertIsInstance(CoreServices.kLSSharedFileListItemBeforeFirst, CoreServices.LSSharedFileListItemRef)
self.assertIsInstance(CoreServices.kLSSharedFileListItemLast, CoreServices.LSSharedFileListItemRef)
def testConstants(self):
self.assertEqual(CoreServices.kLSSharedFileListNoUserInteraction, 1)
self.assertEqual(CoreServices.kLSSharedFileListDoNotMountVolumes, 2)
def testFunctions(self):
self.assertIsInstance(CoreServices.LSSharedFileListGetTypeID(), (int, long))
self.assertIsInstance(CoreServices.LSSharedFileListItemGetTypeID(), (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListCreate)
lst = CoreServices.LSSharedFileListCreate(None, CoreServices.kLSSharedFileListRecentDocumentItems, None)
self.assertIsInstance(lst, CoreServices.LSSharedFileListRef)
rl = CoreServices.CFRunLoopGetCurrent()
self.assertArgIsFunction(CoreServices.LSSharedFileListAddObserver, 3, b'v^{OpaqueLSSharedFileListRef=}^v', True)
self.assertArgHasType(CoreServices.LSSharedFileListAddObserver, 4, b'^v')
@objc.callbackFor(CoreServices.LSSharedFileListAddObserver)
def callback(lst, ctxt):
pass
CoreServices.LSSharedFileListAddObserver(lst, rl, CoreServices.kCFRunLoopDefaultMode, callback, None)
CoreServices.LSSharedFileListRemoveObserver(lst, rl, CoreServices.kCFRunLoopDefaultMode, callback, None)
v = CoreServices.LSSharedFileListGetSeedValue(lst)
self.assertIsInstance(v, (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListCopyProperty)
self.assertResultHasType(CoreServices.LSSharedFileListCopyProperty, b'@')
v = CoreServices.LSSharedFileListCopyProperty(lst, b"pyobjc.name".decode('latin1'))
v = CoreServices.LSSharedFileListSetProperty(lst, b"pyobjc.name".decode('latin1'), b"value".decode('latin1'))
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListCopyProperty(lst, b"pyobjc.name".decode('latin1'))
self.assertEqual(v, b"value".decode('latin1'))
self.assertArgIsOut(CoreServices.LSSharedFileListCopySnapshot, 1)
v, seed = CoreServices.LSSharedFileListCopySnapshot(lst, None)
self.assertIsInstance(v, CoreServices.CFArrayRef)
self.assertIsInstance(seed, (int,long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListInsertItemURL)
url = CoreServices.CFURLCreateWithString(None, "file://" + os.path.expanduser('~'), None)
title = b"PyObjC.Test".decode("latin1")
item = CoreServices.LSSharedFileListInsertItemFSRef(lst, CoreServices.kLSSharedFileListItemLast, title, None, objc.FSRef.from_pathname(os.path.expanduser('~')), None, None)
self.assertIsInstance(item, CoreServices.LSSharedFileListItemRef)
item = CoreServices.LSSharedFileListInsertItemURL(lst, CoreServices.kLSSharedFileListItemLast, title, None, url, None, None)
self.assertIsInstance(item, CoreServices.LSSharedFileListItemRef)
v = CoreServices.LSSharedFileListItemGetID(item)
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListItemCopyIconRef(item)
if v is not None:
self.assertIsInstance(v, CoreServices.IconRef)
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyDisplayName)
v = CoreServices.LSSharedFileListItemCopyDisplayName(item)
self.assertIsInstance(v, unicode)
self.assertArgIsOut(CoreServices.LSSharedFileListItemResolve, 2)
self.assertArgIsOut(CoreServices.LSSharedFileListItemResolve, 3)
self.assertArgIsCFRetained(CoreServices.LSSharedFileListItemResolve, 2)
v, url, ref = CoreServices.LSSharedFileListItemResolve(item, 0, None, objc.NULL)
self.assertIsInstance(v, (int, long))
if url is not None:
self.assertIsInstance(url, CoreServices.CFURLRef)
v = CoreServices.LSSharedFileListItemSetProperty(item, b"pyobjc.name".decode('latin1'), b"pyobjc.test".decode('latin1'))
self.assertIsInstance(v, (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyProperty)
v = CoreServices.LSSharedFileListItemCopyProperty(item, b"pyobjc.name".decode('latin1'))
if v is not None:
self.assertEqual(v, "pyobjc.test")
v = CoreServices.LSSharedFileListItemMove(lst, item, CoreServices.kLSSharedFileListItemBeforeFirst)
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListItemRemove(lst, item)
self.assertIsInstance(v, (int, long))
CoreServices.LSSharedFileListRemoveAllItems
@expectedFailure
def testMissing(self):
# Needs more infrastructure
self.fail('LSSharedFileListSetAuthorization')
# FSRef suckage
self.fail('LSSharedFileListItemRef')
@min_os_level('10.10')
def testFunctions10_10(self):
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyResolvedURL)
self.assertArgIsOut(CoreServices.LSSharedFileListItemCopyResolvedURL, 2)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
0c488ceeb99828d94f18cbc4a70205200c17ac34 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/googlecloudsdk/third_party/appengine/api/taskqueue/taskqueue_service_pb.py | 22507cc53681e81ea9a444cc13ea305778279d52 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 262,953 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: apphosting/api/taskqueue/taskqueue_service.proto
from googlecloudsdk.third_party.appengine.proto import ProtocolBuffer
import abc
import array
import thread
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from googlecloudsdk.third_party.appengine.datastore.datastore_v3_pb import *
import googlecloudsdk.third_party.appengine.datastore.datastore_v3_pb
googlecloudsdk_dot_third_party_dot_appengine_dot_datastore_dot_datastore__v3__pb = __import__('googlecloudsdk.third_party.appengine.datastore.datastore_v3_pb', {}, {}, [''])
from googlecloudsdk.third_party.appengine.proto.message_set import MessageSet
class TaskQueueServiceError(ProtocolBuffer.ProtocolMessage):
# ErrorCode values
OK = 0
UNKNOWN_QUEUE = 1
TRANSIENT_ERROR = 2
INTERNAL_ERROR = 3
TASK_TOO_LARGE = 4
INVALID_TASK_NAME = 5
INVALID_QUEUE_NAME = 6
INVALID_URL = 7
INVALID_QUEUE_RATE = 8
PERMISSION_DENIED = 9
TASK_ALREADY_EXISTS = 10
TOMBSTONED_TASK = 11
INVALID_ETA = 12
INVALID_REQUEST = 13
UNKNOWN_TASK = 14
TOMBSTONED_QUEUE = 15
DUPLICATE_TASK_NAME = 16
SKIPPED = 17
TOO_MANY_TASKS = 18
INVALID_PAYLOAD = 19
INVALID_RETRY_PARAMETERS = 20
INVALID_QUEUE_MODE = 21
ACL_LOOKUP_ERROR = 22
TRANSACTIONAL_REQUEST_TOO_LARGE = 23
INCORRECT_CREATOR_NAME = 24
TASK_LEASE_EXPIRED = 25
QUEUE_PAUSED = 26
INVALID_TAG = 27
DATASTORE_ERROR = 10000
_ErrorCode_NAMES = {
0: "OK",
1: "UNKNOWN_QUEUE",
2: "TRANSIENT_ERROR",
3: "INTERNAL_ERROR",
4: "TASK_TOO_LARGE",
5: "INVALID_TASK_NAME",
6: "INVALID_QUEUE_NAME",
7: "INVALID_URL",
8: "INVALID_QUEUE_RATE",
9: "PERMISSION_DENIED",
10: "TASK_ALREADY_EXISTS",
11: "TOMBSTONED_TASK",
12: "INVALID_ETA",
13: "INVALID_REQUEST",
14: "UNKNOWN_TASK",
15: "TOMBSTONED_QUEUE",
16: "DUPLICATE_TASK_NAME",
17: "SKIPPED",
18: "TOO_MANY_TASKS",
19: "INVALID_PAYLOAD",
20: "INVALID_RETRY_PARAMETERS",
21: "INVALID_QUEUE_MODE",
22: "ACL_LOOKUP_ERROR",
23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
24: "INCORRECT_CREATOR_NAME",
25: "TASK_LEASE_EXPIRED",
26: "QUEUE_PAUSED",
27: "INVALID_TAG",
10000: "DATASTORE_ERROR",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueServiceError'
class TaskQueueRetryParameters(ProtocolBuffer.ProtocolMessage):
has_retry_limit_ = 0
retry_limit_ = 0
has_age_limit_sec_ = 0
age_limit_sec_ = 0
has_min_backoff_sec_ = 0
min_backoff_sec_ = 0.1
has_max_backoff_sec_ = 0
max_backoff_sec_ = 3600.0
has_max_doublings_ = 0
max_doublings_ = 16
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def retry_limit(self): return self.retry_limit_
def set_retry_limit(self, x):
self.has_retry_limit_ = 1
self.retry_limit_ = x
def clear_retry_limit(self):
if self.has_retry_limit_:
self.has_retry_limit_ = 0
self.retry_limit_ = 0
def has_retry_limit(self): return self.has_retry_limit_
def age_limit_sec(self): return self.age_limit_sec_
def set_age_limit_sec(self, x):
self.has_age_limit_sec_ = 1
self.age_limit_sec_ = x
def clear_age_limit_sec(self):
if self.has_age_limit_sec_:
self.has_age_limit_sec_ = 0
self.age_limit_sec_ = 0
def has_age_limit_sec(self): return self.has_age_limit_sec_
def min_backoff_sec(self): return self.min_backoff_sec_
def set_min_backoff_sec(self, x):
self.has_min_backoff_sec_ = 1
self.min_backoff_sec_ = x
def clear_min_backoff_sec(self):
if self.has_min_backoff_sec_:
self.has_min_backoff_sec_ = 0
self.min_backoff_sec_ = 0.1
def has_min_backoff_sec(self): return self.has_min_backoff_sec_
def max_backoff_sec(self): return self.max_backoff_sec_
def set_max_backoff_sec(self, x):
self.has_max_backoff_sec_ = 1
self.max_backoff_sec_ = x
def clear_max_backoff_sec(self):
if self.has_max_backoff_sec_:
self.has_max_backoff_sec_ = 0
self.max_backoff_sec_ = 3600.0
def has_max_backoff_sec(self): return self.has_max_backoff_sec_
def max_doublings(self): return self.max_doublings_
def set_max_doublings(self, x):
self.has_max_doublings_ = 1
self.max_doublings_ = x
def clear_max_doublings(self):
if self.has_max_doublings_:
self.has_max_doublings_ = 0
self.max_doublings_ = 16
def has_max_doublings(self): return self.has_max_doublings_
def MergeFrom(self, x):
assert x is not self
if (x.has_retry_limit()): self.set_retry_limit(x.retry_limit())
if (x.has_age_limit_sec()): self.set_age_limit_sec(x.age_limit_sec())
if (x.has_min_backoff_sec()): self.set_min_backoff_sec(x.min_backoff_sec())
if (x.has_max_backoff_sec()): self.set_max_backoff_sec(x.max_backoff_sec())
if (x.has_max_doublings()): self.set_max_doublings(x.max_doublings())
def Equals(self, x):
if x is self: return 1
if self.has_retry_limit_ != x.has_retry_limit_: return 0
if self.has_retry_limit_ and self.retry_limit_ != x.retry_limit_: return 0
if self.has_age_limit_sec_ != x.has_age_limit_sec_: return 0
if self.has_age_limit_sec_ and self.age_limit_sec_ != x.age_limit_sec_: return 0
if self.has_min_backoff_sec_ != x.has_min_backoff_sec_: return 0
if self.has_min_backoff_sec_ and self.min_backoff_sec_ != x.min_backoff_sec_: return 0
if self.has_max_backoff_sec_ != x.has_max_backoff_sec_: return 0
if self.has_max_backoff_sec_ and self.max_backoff_sec_ != x.max_backoff_sec_: return 0
if self.has_max_doublings_ != x.has_max_doublings_: return 0
if self.has_max_doublings_ and self.max_doublings_ != x.max_doublings_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_retry_limit_): n += 1 + self.lengthVarInt64(self.retry_limit_)
if (self.has_age_limit_sec_): n += 1 + self.lengthVarInt64(self.age_limit_sec_)
if (self.has_min_backoff_sec_): n += 9
if (self.has_max_backoff_sec_): n += 9
if (self.has_max_doublings_): n += 1 + self.lengthVarInt64(self.max_doublings_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_retry_limit_): n += 1 + self.lengthVarInt64(self.retry_limit_)
if (self.has_age_limit_sec_): n += 1 + self.lengthVarInt64(self.age_limit_sec_)
if (self.has_min_backoff_sec_): n += 9
if (self.has_max_backoff_sec_): n += 9
if (self.has_max_doublings_): n += 1 + self.lengthVarInt64(self.max_doublings_)
return n
def Clear(self):
self.clear_retry_limit()
self.clear_age_limit_sec()
self.clear_min_backoff_sec()
self.clear_max_backoff_sec()
self.clear_max_doublings()
def OutputUnchecked(self, out):
if (self.has_retry_limit_):
out.putVarInt32(8)
out.putVarInt32(self.retry_limit_)
if (self.has_age_limit_sec_):
out.putVarInt32(16)
out.putVarInt64(self.age_limit_sec_)
if (self.has_min_backoff_sec_):
out.putVarInt32(25)
out.putDouble(self.min_backoff_sec_)
if (self.has_max_backoff_sec_):
out.putVarInt32(33)
out.putDouble(self.max_backoff_sec_)
if (self.has_max_doublings_):
out.putVarInt32(40)
out.putVarInt32(self.max_doublings_)
def OutputPartial(self, out):
if (self.has_retry_limit_):
out.putVarInt32(8)
out.putVarInt32(self.retry_limit_)
if (self.has_age_limit_sec_):
out.putVarInt32(16)
out.putVarInt64(self.age_limit_sec_)
if (self.has_min_backoff_sec_):
out.putVarInt32(25)
out.putDouble(self.min_backoff_sec_)
if (self.has_max_backoff_sec_):
out.putVarInt32(33)
out.putDouble(self.max_backoff_sec_)
if (self.has_max_doublings_):
out.putVarInt32(40)
out.putVarInt32(self.max_doublings_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_retry_limit(d.getVarInt32())
continue
if tt == 16:
self.set_age_limit_sec(d.getVarInt64())
continue
if tt == 25:
self.set_min_backoff_sec(d.getDouble())
continue
if tt == 33:
self.set_max_backoff_sec(d.getDouble())
continue
if tt == 40:
self.set_max_doublings(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_retry_limit_: res+=prefix+("retry_limit: %s\n" % self.DebugFormatInt32(self.retry_limit_))
if self.has_age_limit_sec_: res+=prefix+("age_limit_sec: %s\n" % self.DebugFormatInt64(self.age_limit_sec_))
if self.has_min_backoff_sec_: res+=prefix+("min_backoff_sec: %s\n" % self.DebugFormat(self.min_backoff_sec_))
if self.has_max_backoff_sec_: res+=prefix+("max_backoff_sec: %s\n" % self.DebugFormat(self.max_backoff_sec_))
if self.has_max_doublings_: res+=prefix+("max_doublings: %s\n" % self.DebugFormatInt32(self.max_doublings_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kretry_limit = 1
kage_limit_sec = 2
kmin_backoff_sec = 3
kmax_backoff_sec = 4
kmax_doublings = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "retry_limit",
2: "age_limit_sec",
3: "min_backoff_sec",
4: "max_backoff_sec",
5: "max_doublings",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueRetryParameters'
class TaskQueueAcl(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.user_email_ = []
self.writer_email_ = []
if contents is not None: self.MergeFromString(contents)
def user_email_size(self): return len(self.user_email_)
def user_email_list(self): return self.user_email_
def user_email(self, i):
return self.user_email_[i]
def set_user_email(self, i, x):
self.user_email_[i] = x
def add_user_email(self, x):
self.user_email_.append(x)
def clear_user_email(self):
self.user_email_ = []
def writer_email_size(self): return len(self.writer_email_)
def writer_email_list(self): return self.writer_email_
def writer_email(self, i):
return self.writer_email_[i]
def set_writer_email(self, i, x):
self.writer_email_[i] = x
def add_writer_email(self, x):
self.writer_email_.append(x)
def clear_writer_email(self):
self.writer_email_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.user_email_size()): self.add_user_email(x.user_email(i))
for i in xrange(x.writer_email_size()): self.add_writer_email(x.writer_email(i))
def Equals(self, x):
if x is self: return 1
if len(self.user_email_) != len(x.user_email_): return 0
for e1, e2 in zip(self.user_email_, x.user_email_):
if e1 != e2: return 0
if len(self.writer_email_) != len(x.writer_email_): return 0
for e1, e2 in zip(self.writer_email_, x.writer_email_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.user_email_)
for i in xrange(len(self.user_email_)): n += self.lengthString(len(self.user_email_[i]))
n += 1 * len(self.writer_email_)
for i in xrange(len(self.writer_email_)): n += self.lengthString(len(self.writer_email_[i]))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.user_email_)
for i in xrange(len(self.user_email_)): n += self.lengthString(len(self.user_email_[i]))
n += 1 * len(self.writer_email_)
for i in xrange(len(self.writer_email_)): n += self.lengthString(len(self.writer_email_[i]))
return n
def Clear(self):
self.clear_user_email()
self.clear_writer_email()
def OutputUnchecked(self, out):
for i in xrange(len(self.user_email_)):
out.putVarInt32(10)
out.putPrefixedString(self.user_email_[i])
for i in xrange(len(self.writer_email_)):
out.putVarInt32(18)
out.putPrefixedString(self.writer_email_[i])
def OutputPartial(self, out):
for i in xrange(len(self.user_email_)):
out.putVarInt32(10)
out.putPrefixedString(self.user_email_[i])
for i in xrange(len(self.writer_email_)):
out.putVarInt32(18)
out.putPrefixedString(self.writer_email_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_user_email(d.getPrefixedString())
continue
if tt == 18:
self.add_writer_email(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.user_email_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("user_email%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.writer_email_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("writer_email%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kuser_email = 1
kwriter_email = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "user_email",
2: "writer_email",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueAcl'
class TaskQueueHttpHeader(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.key_))
n += self.lengthString(len(self.value_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(len(self.key_))
if (self.has_value_):
n += 1
n += self.lengthString(len(self.value_))
return n
def Clear(self):
self.clear_key()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
if (self.has_value_):
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_key(d.getPrefixedString())
continue
if tt == 18:
self.set_value(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueHttpHeader'
class TaskQueueMode(ProtocolBuffer.ProtocolMessage):
# Mode values
PUSH = 0
PULL = 1
_Mode_NAMES = {
0: "PUSH",
1: "PULL",
}
def Mode_Name(cls, x): return cls._Mode_NAMES.get(x, "")
Mode_Name = classmethod(Mode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueMode'
class TaskQueueAddRequest_Header(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.key_))
n += self.lengthString(len(self.value_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(len(self.key_))
if (self.has_value_):
n += 1
n += self.lengthString(len(self.value_))
return n
def Clear(self):
self.clear_key()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(58)
out.putPrefixedString(self.key_)
out.putVarInt32(66)
out.putPrefixedString(self.value_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(58)
out.putPrefixedString(self.key_)
if (self.has_value_):
out.putVarInt32(66)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 52: break
if tt == 58:
self.set_key(d.getPrefixedString())
continue
if tt == 66:
self.set_value(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
class TaskQueueAddRequest_CronTimetable(ProtocolBuffer.ProtocolMessage):
has_schedule_ = 0
schedule_ = ""
has_timezone_ = 0
timezone_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def schedule(self): return self.schedule_
def set_schedule(self, x):
self.has_schedule_ = 1
self.schedule_ = x
def clear_schedule(self):
if self.has_schedule_:
self.has_schedule_ = 0
self.schedule_ = ""
def has_schedule(self): return self.has_schedule_
def timezone(self): return self.timezone_
def set_timezone(self, x):
self.has_timezone_ = 1
self.timezone_ = x
def clear_timezone(self):
if self.has_timezone_:
self.has_timezone_ = 0
self.timezone_ = ""
def has_timezone(self): return self.has_timezone_
def MergeFrom(self, x):
assert x is not self
if (x.has_schedule()): self.set_schedule(x.schedule())
if (x.has_timezone()): self.set_timezone(x.timezone())
def Equals(self, x):
if x is self: return 1
if self.has_schedule_ != x.has_schedule_: return 0
if self.has_schedule_ and self.schedule_ != x.schedule_: return 0
if self.has_timezone_ != x.has_timezone_: return 0
if self.has_timezone_ and self.timezone_ != x.timezone_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_schedule_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: schedule not set.')
if (not self.has_timezone_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: timezone not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.schedule_))
n += self.lengthString(len(self.timezone_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_schedule_):
n += 1
n += self.lengthString(len(self.schedule_))
if (self.has_timezone_):
n += 1
n += self.lengthString(len(self.timezone_))
return n
def Clear(self):
self.clear_schedule()
self.clear_timezone()
def OutputUnchecked(self, out):
out.putVarInt32(106)
out.putPrefixedString(self.schedule_)
out.putVarInt32(114)
out.putPrefixedString(self.timezone_)
def OutputPartial(self, out):
if (self.has_schedule_):
out.putVarInt32(106)
out.putPrefixedString(self.schedule_)
if (self.has_timezone_):
out.putVarInt32(114)
out.putPrefixedString(self.timezone_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 100: break
if tt == 106:
self.set_schedule(d.getPrefixedString())
continue
if tt == 114:
self.set_timezone(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_schedule_: res+=prefix+("schedule: %s\n" % self.DebugFormatString(self.schedule_))
if self.has_timezone_: res+=prefix+("timezone: %s\n" % self.DebugFormatString(self.timezone_))
return res
class TaskQueueAddRequest(ProtocolBuffer.ProtocolMessage):
# RequestMethod values
GET = 1
POST = 2
HEAD = 3
PUT = 4
DELETE = 5
_RequestMethod_NAMES = {
1: "GET",
2: "POST",
3: "HEAD",
4: "PUT",
5: "DELETE",
}
def RequestMethod_Name(cls, x): return cls._RequestMethod_NAMES.get(x, "")
RequestMethod_Name = classmethod(RequestMethod_Name)
has_queue_name_ = 0
queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
has_eta_usec_ = 0
eta_usec_ = 0
has_method_ = 0
method_ = 2
has_url_ = 0
url_ = ""
has_body_ = 0
body_ = ""
has_transaction_ = 0
transaction_ = None
has_datastore_transaction_ = 0
datastore_transaction_ = ""
has_app_id_ = 0
app_id_ = ""
has_crontimetable_ = 0
crontimetable_ = None
has_description_ = 0
description_ = ""
has_payload_ = 0
payload_ = None
has_retry_parameters_ = 0
retry_parameters_ = None
has_mode_ = 0
mode_ = 0
has_tag_ = 0
tag_ = ""
has_cron_retry_parameters_ = 0
cron_retry_parameters_ = None
def __init__(self, contents=None):
self.header_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def eta_usec(self): return self.eta_usec_
def set_eta_usec(self, x):
self.has_eta_usec_ = 1
self.eta_usec_ = x
def clear_eta_usec(self):
if self.has_eta_usec_:
self.has_eta_usec_ = 0
self.eta_usec_ = 0
def has_eta_usec(self): return self.has_eta_usec_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = 2
def has_method(self): return self.has_method_
def url(self): return self.url_
def set_url(self, x):
self.has_url_ = 1
self.url_ = x
def clear_url(self):
if self.has_url_:
self.has_url_ = 0
self.url_ = ""
def has_url(self): return self.has_url_
def header_size(self): return len(self.header_)
def header_list(self): return self.header_
def header(self, i):
return self.header_[i]
def mutable_header(self, i):
return self.header_[i]
def add_header(self):
x = TaskQueueAddRequest_Header()
self.header_.append(x)
return x
def clear_header(self):
self.header_ = []
def body(self): return self.body_
def set_body(self, x):
self.has_body_ = 1
self.body_ = x
def clear_body(self):
if self.has_body_:
self.has_body_ = 0
self.body_ = ""
def has_body(self): return self.has_body_
def transaction(self):
if self.transaction_ is None:
self.lazy_init_lock_.acquire()
try:
if self.transaction_ is None: self.transaction_ = Transaction()
finally:
self.lazy_init_lock_.release()
return self.transaction_
def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
def clear_transaction(self):
# Warning: this method does not acquire the lock.
if self.has_transaction_:
self.has_transaction_ = 0;
if self.transaction_ is not None: self.transaction_.Clear()
def has_transaction(self): return self.has_transaction_
def datastore_transaction(self): return self.datastore_transaction_
def set_datastore_transaction(self, x):
self.has_datastore_transaction_ = 1
self.datastore_transaction_ = x
def clear_datastore_transaction(self):
if self.has_datastore_transaction_:
self.has_datastore_transaction_ = 0
self.datastore_transaction_ = ""
def has_datastore_transaction(self): return self.has_datastore_transaction_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def crontimetable(self):
if self.crontimetable_ is None:
self.lazy_init_lock_.acquire()
try:
if self.crontimetable_ is None: self.crontimetable_ = TaskQueueAddRequest_CronTimetable()
finally:
self.lazy_init_lock_.release()
return self.crontimetable_
def mutable_crontimetable(self): self.has_crontimetable_ = 1; return self.crontimetable()
def clear_crontimetable(self):
# Warning: this method does not acquire the lock.
if self.has_crontimetable_:
self.has_crontimetable_ = 0;
if self.crontimetable_ is not None: self.crontimetable_.Clear()
def has_crontimetable(self): return self.has_crontimetable_
def description(self): return self.description_
def set_description(self, x):
self.has_description_ = 1
self.description_ = x
def clear_description(self):
if self.has_description_:
self.has_description_ = 0
self.description_ = ""
def has_description(self): return self.has_description_
def payload(self):
if self.payload_ is None:
self.lazy_init_lock_.acquire()
try:
if self.payload_ is None: self.payload_ = MessageSet()
finally:
self.lazy_init_lock_.release()
return self.payload_
def mutable_payload(self): self.has_payload_ = 1; return self.payload()
def clear_payload(self):
# Warning: this method does not acquire the lock.
if self.has_payload_:
self.has_payload_ = 0;
if self.payload_ is not None: self.payload_.Clear()
def has_payload(self): return self.has_payload_
def retry_parameters(self):
if self.retry_parameters_ is None:
self.lazy_init_lock_.acquire()
try:
if self.retry_parameters_ is None: self.retry_parameters_ = TaskQueueRetryParameters()
finally:
self.lazy_init_lock_.release()
return self.retry_parameters_
def mutable_retry_parameters(self): self.has_retry_parameters_ = 1; return self.retry_parameters()
def clear_retry_parameters(self):
# Warning: this method does not acquire the lock.
if self.has_retry_parameters_:
self.has_retry_parameters_ = 0;
if self.retry_parameters_ is not None: self.retry_parameters_.Clear()
def has_retry_parameters(self): return self.has_retry_parameters_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 0
def has_mode(self): return self.has_mode_
def tag(self): return self.tag_
def set_tag(self, x):
self.has_tag_ = 1
self.tag_ = x
def clear_tag(self):
if self.has_tag_:
self.has_tag_ = 0
self.tag_ = ""
def has_tag(self): return self.has_tag_
def cron_retry_parameters(self):
if self.cron_retry_parameters_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cron_retry_parameters_ is None: self.cron_retry_parameters_ = TaskQueueRetryParameters()
finally:
self.lazy_init_lock_.release()
return self.cron_retry_parameters_
def mutable_cron_retry_parameters(self): self.has_cron_retry_parameters_ = 1; return self.cron_retry_parameters()
def clear_cron_retry_parameters(self):
# Warning: this method does not acquire the lock.
if self.has_cron_retry_parameters_:
self.has_cron_retry_parameters_ = 0;
if self.cron_retry_parameters_ is not None: self.cron_retry_parameters_.Clear()
def has_cron_retry_parameters(self): return self.has_cron_retry_parameters_
def MergeFrom(self, x):
assert x is not self
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_eta_usec()): self.set_eta_usec(x.eta_usec())
if (x.has_method()): self.set_method(x.method())
if (x.has_url()): self.set_url(x.url())
for i in xrange(x.header_size()): self.add_header().CopyFrom(x.header(i))
if (x.has_body()): self.set_body(x.body())
if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
if (x.has_datastore_transaction()): self.set_datastore_transaction(x.datastore_transaction())
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_crontimetable()): self.mutable_crontimetable().MergeFrom(x.crontimetable())
if (x.has_description()): self.set_description(x.description())
if (x.has_payload()): self.mutable_payload().MergeFrom(x.payload())
if (x.has_retry_parameters()): self.mutable_retry_parameters().MergeFrom(x.retry_parameters())
if (x.has_mode()): self.set_mode(x.mode())
if (x.has_tag()): self.set_tag(x.tag())
if (x.has_cron_retry_parameters()): self.mutable_cron_retry_parameters().MergeFrom(x.cron_retry_parameters())
def Equals(self, x):
if x is self: return 1
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_eta_usec_ != x.has_eta_usec_: return 0
if self.has_eta_usec_ and self.eta_usec_ != x.eta_usec_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_url_ != x.has_url_: return 0
if self.has_url_ and self.url_ != x.url_: return 0
if len(self.header_) != len(x.header_): return 0
for e1, e2 in zip(self.header_, x.header_):
if e1 != e2: return 0
if self.has_body_ != x.has_body_: return 0
if self.has_body_ and self.body_ != x.body_: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if self.has_datastore_transaction_ != x.has_datastore_transaction_: return 0
if self.has_datastore_transaction_ and self.datastore_transaction_ != x.datastore_transaction_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_crontimetable_ != x.has_crontimetable_: return 0
if self.has_crontimetable_ and self.crontimetable_ != x.crontimetable_: return 0
if self.has_description_ != x.has_description_: return 0
if self.has_description_ and self.description_ != x.description_: return 0
if self.has_payload_ != x.has_payload_: return 0
if self.has_payload_ and self.payload_ != x.payload_: return 0
if self.has_retry_parameters_ != x.has_retry_parameters_: return 0
if self.has_retry_parameters_ and self.retry_parameters_ != x.retry_parameters_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
if self.has_tag_ != x.has_tag_: return 0
if self.has_tag_ and self.tag_ != x.tag_: return 0
if self.has_cron_retry_parameters_ != x.has_cron_retry_parameters_: return 0
if self.has_cron_retry_parameters_ and self.cron_retry_parameters_ != x.cron_retry_parameters_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
if (not self.has_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: eta_usec not set.')
for p in self.header_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
if (self.has_crontimetable_ and not self.crontimetable_.IsInitialized(debug_strs)): initialized = 0
if (self.has_payload_ and not self.payload_.IsInitialized(debug_strs)): initialized = 0
if (self.has_retry_parameters_ and not self.retry_parameters_.IsInitialized(debug_strs)): initialized = 0
if (self.has_cron_retry_parameters_ and not self.cron_retry_parameters_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.queue_name_))
n += self.lengthString(len(self.task_name_))
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_method_): n += 1 + self.lengthVarInt64(self.method_)
if (self.has_url_): n += 1 + self.lengthString(len(self.url_))
n += 2 * len(self.header_)
for i in xrange(len(self.header_)): n += self.header_[i].ByteSize()
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
if (self.has_datastore_transaction_): n += 2 + self.lengthString(len(self.datastore_transaction_))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_crontimetable_): n += 2 + self.crontimetable_.ByteSize()
if (self.has_description_): n += 1 + self.lengthString(len(self.description_))
if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSize())
if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSize())
if (self.has_mode_): n += 2 + self.lengthVarInt64(self.mode_)
if (self.has_tag_): n += 2 + self.lengthString(len(self.tag_))
if (self.has_cron_retry_parameters_): n += 2 + self.lengthString(self.cron_retry_parameters_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
if (self.has_eta_usec_):
n += 1
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_method_): n += 1 + self.lengthVarInt64(self.method_)
if (self.has_url_): n += 1 + self.lengthString(len(self.url_))
n += 2 * len(self.header_)
for i in xrange(len(self.header_)): n += self.header_[i].ByteSizePartial()
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
if (self.has_datastore_transaction_): n += 2 + self.lengthString(len(self.datastore_transaction_))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_crontimetable_): n += 2 + self.crontimetable_.ByteSizePartial()
if (self.has_description_): n += 1 + self.lengthString(len(self.description_))
if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSizePartial())
if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSizePartial())
if (self.has_mode_): n += 2 + self.lengthVarInt64(self.mode_)
if (self.has_tag_): n += 2 + self.lengthString(len(self.tag_))
if (self.has_cron_retry_parameters_): n += 2 + self.lengthString(self.cron_retry_parameters_.ByteSizePartial())
return n
def Clear(self):
self.clear_queue_name()
self.clear_task_name()
self.clear_eta_usec()
self.clear_method()
self.clear_url()
self.clear_header()
self.clear_body()
self.clear_transaction()
self.clear_datastore_transaction()
self.clear_app_id()
self.clear_crontimetable()
self.clear_description()
self.clear_payload()
self.clear_retry_parameters()
self.clear_mode()
self.clear_tag()
self.clear_cron_retry_parameters()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_url_):
out.putVarInt32(34)
out.putPrefixedString(self.url_)
if (self.has_method_):
out.putVarInt32(40)
out.putVarInt32(self.method_)
for i in xrange(len(self.header_)):
out.putVarInt32(51)
self.header_[i].OutputUnchecked(out)
out.putVarInt32(52)
if (self.has_body_):
out.putVarInt32(74)
out.putPrefixedString(self.body_)
if (self.has_transaction_):
out.putVarInt32(82)
out.putVarInt32(self.transaction_.ByteSize())
self.transaction_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(90)
out.putPrefixedString(self.app_id_)
if (self.has_crontimetable_):
out.putVarInt32(99)
self.crontimetable_.OutputUnchecked(out)
out.putVarInt32(100)
if (self.has_description_):
out.putVarInt32(122)
out.putPrefixedString(self.description_)
if (self.has_payload_):
out.putVarInt32(130)
out.putVarInt32(self.payload_.ByteSize())
self.payload_.OutputUnchecked(out)
if (self.has_retry_parameters_):
out.putVarInt32(138)
out.putVarInt32(self.retry_parameters_.ByteSize())
self.retry_parameters_.OutputUnchecked(out)
if (self.has_mode_):
out.putVarInt32(144)
out.putVarInt32(self.mode_)
if (self.has_tag_):
out.putVarInt32(154)
out.putPrefixedString(self.tag_)
if (self.has_cron_retry_parameters_):
out.putVarInt32(162)
out.putVarInt32(self.cron_retry_parameters_.ByteSize())
self.cron_retry_parameters_.OutputUnchecked(out)
if (self.has_datastore_transaction_):
out.putVarInt32(170)
out.putPrefixedString(self.datastore_transaction_)
def OutputPartial(self, out):
if (self.has_queue_name_):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
if (self.has_task_name_):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
if (self.has_eta_usec_):
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_url_):
out.putVarInt32(34)
out.putPrefixedString(self.url_)
if (self.has_method_):
out.putVarInt32(40)
out.putVarInt32(self.method_)
for i in xrange(len(self.header_)):
out.putVarInt32(51)
self.header_[i].OutputPartial(out)
out.putVarInt32(52)
if (self.has_body_):
out.putVarInt32(74)
out.putPrefixedString(self.body_)
if (self.has_transaction_):
out.putVarInt32(82)
out.putVarInt32(self.transaction_.ByteSizePartial())
self.transaction_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(90)
out.putPrefixedString(self.app_id_)
if (self.has_crontimetable_):
out.putVarInt32(99)
self.crontimetable_.OutputPartial(out)
out.putVarInt32(100)
if (self.has_description_):
out.putVarInt32(122)
out.putPrefixedString(self.description_)
if (self.has_payload_):
out.putVarInt32(130)
out.putVarInt32(self.payload_.ByteSizePartial())
self.payload_.OutputPartial(out)
if (self.has_retry_parameters_):
out.putVarInt32(138)
out.putVarInt32(self.retry_parameters_.ByteSizePartial())
self.retry_parameters_.OutputPartial(out)
if (self.has_mode_):
out.putVarInt32(144)
out.putVarInt32(self.mode_)
if (self.has_tag_):
out.putVarInt32(154)
out.putPrefixedString(self.tag_)
if (self.has_cron_retry_parameters_):
out.putVarInt32(162)
out.putVarInt32(self.cron_retry_parameters_.ByteSizePartial())
self.cron_retry_parameters_.OutputPartial(out)
if (self.has_datastore_transaction_):
out.putVarInt32(170)
out.putPrefixedString(self.datastore_transaction_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 18:
self.set_task_name(d.getPrefixedString())
continue
if tt == 24:
self.set_eta_usec(d.getVarInt64())
continue
if tt == 34:
self.set_url(d.getPrefixedString())
continue
if tt == 40:
self.set_method(d.getVarInt32())
continue
if tt == 51:
self.add_header().TryMerge(d)
continue
if tt == 74:
self.set_body(d.getPrefixedString())
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_transaction().TryMerge(tmp)
continue
if tt == 90:
self.set_app_id(d.getPrefixedString())
continue
if tt == 99:
self.mutable_crontimetable().TryMerge(d)
continue
if tt == 122:
self.set_description(d.getPrefixedString())
continue
if tt == 130:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_payload().TryMerge(tmp)
continue
if tt == 138:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_retry_parameters().TryMerge(tmp)
continue
if tt == 144:
self.set_mode(d.getVarInt32())
continue
if tt == 154:
self.set_tag(d.getPrefixedString())
continue
if tt == 162:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cron_retry_parameters().TryMerge(tmp)
continue
if tt == 170:
self.set_datastore_transaction(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_eta_usec_: res+=prefix+("eta_usec: %s\n" % self.DebugFormatInt64(self.eta_usec_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatInt32(self.method_))
if self.has_url_: res+=prefix+("url: %s\n" % self.DebugFormatString(self.url_))
cnt=0
for e in self.header_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Header%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_body_: res+=prefix+("body: %s\n" % self.DebugFormatString(self.body_))
if self.has_transaction_:
res+=prefix+"transaction <\n"
res+=self.transaction_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_datastore_transaction_: res+=prefix+("datastore_transaction: %s\n" % self.DebugFormatString(self.datastore_transaction_))
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_crontimetable_:
res+=prefix+"CronTimetable {\n"
res+=self.crontimetable_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_description_: res+=prefix+("description: %s\n" % self.DebugFormatString(self.description_))
if self.has_payload_:
res+=prefix+"payload <\n"
res+=self.payload_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_retry_parameters_:
res+=prefix+"retry_parameters <\n"
res+=self.retry_parameters_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
if self.has_tag_: res+=prefix+("tag: %s\n" % self.DebugFormatString(self.tag_))
if self.has_cron_retry_parameters_:
res+=prefix+"cron_retry_parameters <\n"
res+=self.cron_retry_parameters_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kqueue_name = 1
ktask_name = 2
keta_usec = 3
kmethod = 5
kurl = 4
kHeaderGroup = 6
kHeaderkey = 7
kHeadervalue = 8
kbody = 9
ktransaction = 10
kdatastore_transaction = 21
kapp_id = 11
kCronTimetableGroup = 12
kCronTimetableschedule = 13
kCronTimetabletimezone = 14
kdescription = 15
kpayload = 16
kretry_parameters = 17
kmode = 18
ktag = 19
kcron_retry_parameters = 20
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "queue_name",
2: "task_name",
3: "eta_usec",
4: "url",
5: "method",
6: "Header",
7: "key",
8: "value",
9: "body",
10: "transaction",
11: "app_id",
12: "CronTimetable",
13: "schedule",
14: "timezone",
15: "description",
16: "payload",
17: "retry_parameters",
18: "mode",
19: "tag",
20: "cron_retry_parameters",
21: "datastore_transaction",
}, 21)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STARTGROUP,
7: ProtocolBuffer.Encoder.STRING,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STARTGROUP,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.STRING,
21: ProtocolBuffer.Encoder.STRING,
}, 21, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueAddRequest'
class TaskQueueAddResponse(ProtocolBuffer.ProtocolMessage):
has_chosen_task_name_ = 0
chosen_task_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def chosen_task_name(self): return self.chosen_task_name_
def set_chosen_task_name(self, x):
self.has_chosen_task_name_ = 1
self.chosen_task_name_ = x
def clear_chosen_task_name(self):
if self.has_chosen_task_name_:
self.has_chosen_task_name_ = 0
self.chosen_task_name_ = ""
def has_chosen_task_name(self): return self.has_chosen_task_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_chosen_task_name()): self.set_chosen_task_name(x.chosen_task_name())
def Equals(self, x):
if x is self: return 1
if self.has_chosen_task_name_ != x.has_chosen_task_name_: return 0
if self.has_chosen_task_name_ and self.chosen_task_name_ != x.chosen_task_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_chosen_task_name_): n += 1 + self.lengthString(len(self.chosen_task_name_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_chosen_task_name_): n += 1 + self.lengthString(len(self.chosen_task_name_))
return n
def Clear(self):
self.clear_chosen_task_name()
def OutputUnchecked(self, out):
if (self.has_chosen_task_name_):
out.putVarInt32(10)
out.putPrefixedString(self.chosen_task_name_)
def OutputPartial(self, out):
if (self.has_chosen_task_name_):
out.putVarInt32(10)
out.putPrefixedString(self.chosen_task_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_chosen_task_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_chosen_task_name_: res+=prefix+("chosen_task_name: %s\n" % self.DebugFormatString(self.chosen_task_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kchosen_task_name = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "chosen_task_name",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueAddResponse'
class TaskQueueBulkAddRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.add_request_ = []
if contents is not None: self.MergeFromString(contents)
def add_request_size(self): return len(self.add_request_)
def add_request_list(self): return self.add_request_
def add_request(self, i):
return self.add_request_[i]
def mutable_add_request(self, i):
return self.add_request_[i]
def add_add_request(self):
x = TaskQueueAddRequest()
self.add_request_.append(x)
return x
def clear_add_request(self):
self.add_request_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.add_request_size()): self.add_add_request().CopyFrom(x.add_request(i))
def Equals(self, x):
if x is self: return 1
if len(self.add_request_) != len(x.add_request_): return 0
for e1, e2 in zip(self.add_request_, x.add_request_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.add_request_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.add_request_)
for i in xrange(len(self.add_request_)): n += self.lengthString(self.add_request_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.add_request_)
for i in xrange(len(self.add_request_)): n += self.lengthString(self.add_request_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_add_request()
def OutputUnchecked(self, out):
for i in xrange(len(self.add_request_)):
out.putVarInt32(10)
out.putVarInt32(self.add_request_[i].ByteSize())
self.add_request_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.add_request_)):
out.putVarInt32(10)
out.putVarInt32(self.add_request_[i].ByteSizePartial())
self.add_request_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_add_request().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.add_request_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("add_request%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kadd_request = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "add_request",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueBulkAddRequest'
class TaskQueueBulkAddResponse_TaskResult(ProtocolBuffer.ProtocolMessage):
has_result_ = 0
result_ = 0
has_chosen_task_name_ = 0
chosen_task_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def result(self): return self.result_
def set_result(self, x):
self.has_result_ = 1
self.result_ = x
def clear_result(self):
if self.has_result_:
self.has_result_ = 0
self.result_ = 0
def has_result(self): return self.has_result_
def chosen_task_name(self): return self.chosen_task_name_
def set_chosen_task_name(self, x):
self.has_chosen_task_name_ = 1
self.chosen_task_name_ = x
def clear_chosen_task_name(self):
if self.has_chosen_task_name_:
self.has_chosen_task_name_ = 0
self.chosen_task_name_ = ""
def has_chosen_task_name(self): return self.has_chosen_task_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_result()): self.set_result(x.result())
if (x.has_chosen_task_name()): self.set_chosen_task_name(x.chosen_task_name())
def Equals(self, x):
if x is self: return 1
if self.has_result_ != x.has_result_: return 0
if self.has_result_ and self.result_ != x.result_: return 0
if self.has_chosen_task_name_ != x.has_chosen_task_name_: return 0
if self.has_chosen_task_name_ and self.chosen_task_name_ != x.chosen_task_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_result_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.result_)
if (self.has_chosen_task_name_): n += 1 + self.lengthString(len(self.chosen_task_name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_result_):
n += 1
n += self.lengthVarInt64(self.result_)
if (self.has_chosen_task_name_): n += 1 + self.lengthString(len(self.chosen_task_name_))
return n
def Clear(self):
self.clear_result()
self.clear_chosen_task_name()
def OutputUnchecked(self, out):
out.putVarInt32(16)
out.putVarInt32(self.result_)
if (self.has_chosen_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.chosen_task_name_)
def OutputPartial(self, out):
if (self.has_result_):
out.putVarInt32(16)
out.putVarInt32(self.result_)
if (self.has_chosen_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.chosen_task_name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 16:
self.set_result(d.getVarInt32())
continue
if tt == 26:
self.set_chosen_task_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_result_: res+=prefix+("result: %s\n" % self.DebugFormatInt32(self.result_))
if self.has_chosen_task_name_: res+=prefix+("chosen_task_name: %s\n" % self.DebugFormatString(self.chosen_task_name_))
return res
class TaskQueueBulkAddResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.taskresult_ = []
if contents is not None: self.MergeFromString(contents)
def taskresult_size(self): return len(self.taskresult_)
def taskresult_list(self): return self.taskresult_
def taskresult(self, i):
return self.taskresult_[i]
def mutable_taskresult(self, i):
return self.taskresult_[i]
def add_taskresult(self):
x = TaskQueueBulkAddResponse_TaskResult()
self.taskresult_.append(x)
return x
def clear_taskresult(self):
self.taskresult_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.taskresult_size()): self.add_taskresult().CopyFrom(x.taskresult(i))
def Equals(self, x):
if x is self: return 1
if len(self.taskresult_) != len(x.taskresult_): return 0
for e1, e2 in zip(self.taskresult_, x.taskresult_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.taskresult_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.taskresult_)
for i in xrange(len(self.taskresult_)): n += self.taskresult_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.taskresult_)
for i in xrange(len(self.taskresult_)): n += self.taskresult_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_taskresult()
def OutputUnchecked(self, out):
for i in xrange(len(self.taskresult_)):
out.putVarInt32(11)
self.taskresult_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.taskresult_)):
out.putVarInt32(11)
self.taskresult_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_taskresult().TryMerge(d)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.taskresult_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("TaskResult%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kTaskResultGroup = 1
kTaskResultresult = 2
kTaskResultchosen_task_name = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "TaskResult",
2: "result",
3: "chosen_task_name",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueBulkAddResponse'
class TaskQueueDeleteRequest(ProtocolBuffer.ProtocolMessage):
has_queue_name_ = 0
queue_name_ = ""
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.task_name_ = []
if contents is not None: self.MergeFromString(contents)
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def task_name_size(self): return len(self.task_name_)
def task_name_list(self): return self.task_name_
def task_name(self, i):
return self.task_name_[i]
def set_task_name(self, i, x):
self.task_name_[i] = x
def add_task_name(self, x):
self.task_name_.append(x)
def clear_task_name(self):
self.task_name_ = []
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
for i in xrange(x.task_name_size()): self.add_task_name(x.task_name(i))
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if len(self.task_name_) != len(x.task_name_): return 0
for e1, e2 in zip(self.task_name_, x.task_name_):
if e1 != e2: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.queue_name_))
n += 1 * len(self.task_name_)
for i in xrange(len(self.task_name_)): n += self.lengthString(len(self.task_name_[i]))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
n += 1 * len(self.task_name_)
for i in xrange(len(self.task_name_)): n += self.lengthString(len(self.task_name_[i]))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_queue_name()
self.clear_task_name()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
for i in xrange(len(self.task_name_)):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_[i])
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_queue_name_):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
for i in xrange(len(self.task_name_)):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_[i])
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 18:
self.add_task_name(d.getPrefixedString())
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
cnt=0
for e in self.task_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("task_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kqueue_name = 1
ktask_name = 2
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "queue_name",
2: "task_name",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteRequest'
class TaskQueueDeleteResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.result_ = []
if contents is not None: self.MergeFromString(contents)
def result_size(self): return len(self.result_)
def result_list(self): return self.result_
def result(self, i):
return self.result_[i]
def set_result(self, i, x):
self.result_[i] = x
def add_result(self, x):
self.result_.append(x)
def clear_result(self):
self.result_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.result_size()): self.add_result(x.result(i))
def Equals(self, x):
if x is self: return 1
if len(self.result_) != len(x.result_): return 0
for e1, e2 in zip(self.result_, x.result_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthVarInt64(self.result_[i])
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthVarInt64(self.result_[i])
return n
def Clear(self):
self.clear_result()
def OutputUnchecked(self, out):
for i in xrange(len(self.result_)):
out.putVarInt32(24)
out.putVarInt32(self.result_[i])
def OutputPartial(self, out):
for i in xrange(len(self.result_)):
out.putVarInt32(24)
out.putVarInt32(self.result_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 24:
self.add_result(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("result%s: %s\n" % (elm, self.DebugFormatInt32(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresult = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
3: "result",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteResponse'
class TaskQueueForceRunRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
n += self.lengthString(len(self.task_name_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_task_name()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(26)
out.putPrefixedString(self.task_name_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.task_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 26:
self.set_task_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
ktask_name = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "task_name",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueForceRunRequest'
class TaskQueueForceRunResponse(ProtocolBuffer.ProtocolMessage):
has_result_ = 0
result_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def result(self): return self.result_
def set_result(self, x):
self.has_result_ = 1
self.result_ = x
def clear_result(self):
if self.has_result_:
self.has_result_ = 0
self.result_ = 0
def has_result(self): return self.has_result_
def MergeFrom(self, x):
assert x is not self
if (x.has_result()): self.set_result(x.result())
def Equals(self, x):
if x is self: return 1
if self.has_result_ != x.has_result_: return 0
if self.has_result_ and self.result_ != x.result_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_result_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.result_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_result_):
n += 1
n += self.lengthVarInt64(self.result_)
return n
def Clear(self):
self.clear_result()
def OutputUnchecked(self, out):
out.putVarInt32(24)
out.putVarInt32(self.result_)
def OutputPartial(self, out):
if (self.has_result_):
out.putVarInt32(24)
out.putVarInt32(self.result_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 24:
self.set_result(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_result_: res+=prefix+("result: %s\n" % self.DebugFormatInt32(self.result_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresult = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
3: "result",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueForceRunResponse'
class TaskQueueUpdateQueueRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
has_bucket_refill_per_second_ = 0
bucket_refill_per_second_ = 0.0
has_bucket_capacity_ = 0
bucket_capacity_ = 0
has_user_specified_rate_ = 0
user_specified_rate_ = ""
has_retry_parameters_ = 0
retry_parameters_ = None
has_max_concurrent_requests_ = 0
max_concurrent_requests_ = 0
has_mode_ = 0
mode_ = 0
has_acl_ = 0
acl_ = None
def __init__(self, contents=None):
self.header_override_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def bucket_refill_per_second(self): return self.bucket_refill_per_second_
def set_bucket_refill_per_second(self, x):
self.has_bucket_refill_per_second_ = 1
self.bucket_refill_per_second_ = x
def clear_bucket_refill_per_second(self):
if self.has_bucket_refill_per_second_:
self.has_bucket_refill_per_second_ = 0
self.bucket_refill_per_second_ = 0.0
def has_bucket_refill_per_second(self): return self.has_bucket_refill_per_second_
def bucket_capacity(self): return self.bucket_capacity_
def set_bucket_capacity(self, x):
self.has_bucket_capacity_ = 1
self.bucket_capacity_ = x
def clear_bucket_capacity(self):
if self.has_bucket_capacity_:
self.has_bucket_capacity_ = 0
self.bucket_capacity_ = 0
def has_bucket_capacity(self): return self.has_bucket_capacity_
def user_specified_rate(self): return self.user_specified_rate_
def set_user_specified_rate(self, x):
self.has_user_specified_rate_ = 1
self.user_specified_rate_ = x
def clear_user_specified_rate(self):
if self.has_user_specified_rate_:
self.has_user_specified_rate_ = 0
self.user_specified_rate_ = ""
def has_user_specified_rate(self): return self.has_user_specified_rate_
def retry_parameters(self):
if self.retry_parameters_ is None:
self.lazy_init_lock_.acquire()
try:
if self.retry_parameters_ is None: self.retry_parameters_ = TaskQueueRetryParameters()
finally:
self.lazy_init_lock_.release()
return self.retry_parameters_
def mutable_retry_parameters(self): self.has_retry_parameters_ = 1; return self.retry_parameters()
def clear_retry_parameters(self):
# Warning: this method does not acquire the lock.
if self.has_retry_parameters_:
self.has_retry_parameters_ = 0;
if self.retry_parameters_ is not None: self.retry_parameters_.Clear()
def has_retry_parameters(self): return self.has_retry_parameters_
def max_concurrent_requests(self): return self.max_concurrent_requests_
def set_max_concurrent_requests(self, x):
self.has_max_concurrent_requests_ = 1
self.max_concurrent_requests_ = x
def clear_max_concurrent_requests(self):
if self.has_max_concurrent_requests_:
self.has_max_concurrent_requests_ = 0
self.max_concurrent_requests_ = 0
def has_max_concurrent_requests(self): return self.has_max_concurrent_requests_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 0
def has_mode(self): return self.has_mode_
def acl(self):
if self.acl_ is None:
self.lazy_init_lock_.acquire()
try:
if self.acl_ is None: self.acl_ = TaskQueueAcl()
finally:
self.lazy_init_lock_.release()
return self.acl_
def mutable_acl(self): self.has_acl_ = 1; return self.acl()
def clear_acl(self):
# Warning: this method does not acquire the lock.
if self.has_acl_:
self.has_acl_ = 0;
if self.acl_ is not None: self.acl_.Clear()
def has_acl(self): return self.has_acl_
def header_override_size(self): return len(self.header_override_)
def header_override_list(self): return self.header_override_
def header_override(self, i):
return self.header_override_[i]
def mutable_header_override(self, i):
return self.header_override_[i]
def add_header_override(self):
x = TaskQueueHttpHeader()
self.header_override_.append(x)
return x
def clear_header_override(self):
self.header_override_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_bucket_refill_per_second()): self.set_bucket_refill_per_second(x.bucket_refill_per_second())
if (x.has_bucket_capacity()): self.set_bucket_capacity(x.bucket_capacity())
if (x.has_user_specified_rate()): self.set_user_specified_rate(x.user_specified_rate())
if (x.has_retry_parameters()): self.mutable_retry_parameters().MergeFrom(x.retry_parameters())
if (x.has_max_concurrent_requests()): self.set_max_concurrent_requests(x.max_concurrent_requests())
if (x.has_mode()): self.set_mode(x.mode())
if (x.has_acl()): self.mutable_acl().MergeFrom(x.acl())
for i in xrange(x.header_override_size()): self.add_header_override().CopyFrom(x.header_override(i))
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_bucket_refill_per_second_ != x.has_bucket_refill_per_second_: return 0
if self.has_bucket_refill_per_second_ and self.bucket_refill_per_second_ != x.bucket_refill_per_second_: return 0
if self.has_bucket_capacity_ != x.has_bucket_capacity_: return 0
if self.has_bucket_capacity_ and self.bucket_capacity_ != x.bucket_capacity_: return 0
if self.has_user_specified_rate_ != x.has_user_specified_rate_: return 0
if self.has_user_specified_rate_ and self.user_specified_rate_ != x.user_specified_rate_: return 0
if self.has_retry_parameters_ != x.has_retry_parameters_: return 0
if self.has_retry_parameters_ and self.retry_parameters_ != x.retry_parameters_: return 0
if self.has_max_concurrent_requests_ != x.has_max_concurrent_requests_: return 0
if self.has_max_concurrent_requests_ and self.max_concurrent_requests_ != x.max_concurrent_requests_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
if self.has_acl_ != x.has_acl_: return 0
if self.has_acl_ and self.acl_ != x.acl_: return 0
if len(self.header_override_) != len(x.header_override_): return 0
for e1, e2 in zip(self.header_override_, x.header_override_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_bucket_refill_per_second_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: bucket_refill_per_second not set.')
if (not self.has_bucket_capacity_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: bucket_capacity not set.')
if (self.has_retry_parameters_ and not self.retry_parameters_.IsInitialized(debug_strs)): initialized = 0
if (self.has_acl_ and not self.acl_.IsInitialized(debug_strs)): initialized = 0
for p in self.header_override_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
n += self.lengthVarInt64(self.bucket_capacity_)
if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
if (self.has_retry_parameters_): n += 1 + self.lengthString(self.retry_parameters_.ByteSize())
if (self.has_max_concurrent_requests_): n += 1 + self.lengthVarInt64(self.max_concurrent_requests_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_acl_): n += 1 + self.lengthString(self.acl_.ByteSize())
n += 1 * len(self.header_override_)
for i in xrange(len(self.header_override_)): n += self.lengthString(self.header_override_[i].ByteSize())
return n + 11
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_bucket_refill_per_second_):
n += 9
if (self.has_bucket_capacity_):
n += 1
n += self.lengthVarInt64(self.bucket_capacity_)
if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
if (self.has_retry_parameters_): n += 1 + self.lengthString(self.retry_parameters_.ByteSizePartial())
if (self.has_max_concurrent_requests_): n += 1 + self.lengthVarInt64(self.max_concurrent_requests_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_acl_): n += 1 + self.lengthString(self.acl_.ByteSizePartial())
n += 1 * len(self.header_override_)
for i in xrange(len(self.header_override_)): n += self.lengthString(self.header_override_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_bucket_refill_per_second()
self.clear_bucket_capacity()
self.clear_user_specified_rate()
self.clear_retry_parameters()
self.clear_max_concurrent_requests()
self.clear_mode()
self.clear_acl()
self.clear_header_override()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(25)
out.putDouble(self.bucket_refill_per_second_)
out.putVarInt32(32)
out.putVarInt32(self.bucket_capacity_)
if (self.has_user_specified_rate_):
out.putVarInt32(42)
out.putPrefixedString(self.user_specified_rate_)
if (self.has_retry_parameters_):
out.putVarInt32(50)
out.putVarInt32(self.retry_parameters_.ByteSize())
self.retry_parameters_.OutputUnchecked(out)
if (self.has_max_concurrent_requests_):
out.putVarInt32(56)
out.putVarInt32(self.max_concurrent_requests_)
if (self.has_mode_):
out.putVarInt32(64)
out.putVarInt32(self.mode_)
if (self.has_acl_):
out.putVarInt32(74)
out.putVarInt32(self.acl_.ByteSize())
self.acl_.OutputUnchecked(out)
for i in xrange(len(self.header_override_)):
out.putVarInt32(82)
out.putVarInt32(self.header_override_[i].ByteSize())
self.header_override_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_bucket_refill_per_second_):
out.putVarInt32(25)
out.putDouble(self.bucket_refill_per_second_)
if (self.has_bucket_capacity_):
out.putVarInt32(32)
out.putVarInt32(self.bucket_capacity_)
if (self.has_user_specified_rate_):
out.putVarInt32(42)
out.putPrefixedString(self.user_specified_rate_)
if (self.has_retry_parameters_):
out.putVarInt32(50)
out.putVarInt32(self.retry_parameters_.ByteSizePartial())
self.retry_parameters_.OutputPartial(out)
if (self.has_max_concurrent_requests_):
out.putVarInt32(56)
out.putVarInt32(self.max_concurrent_requests_)
if (self.has_mode_):
out.putVarInt32(64)
out.putVarInt32(self.mode_)
if (self.has_acl_):
out.putVarInt32(74)
out.putVarInt32(self.acl_.ByteSizePartial())
self.acl_.OutputPartial(out)
for i in xrange(len(self.header_override_)):
out.putVarInt32(82)
out.putVarInt32(self.header_override_[i].ByteSizePartial())
self.header_override_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 25:
self.set_bucket_refill_per_second(d.getDouble())
continue
if tt == 32:
self.set_bucket_capacity(d.getVarInt32())
continue
if tt == 42:
self.set_user_specified_rate(d.getPrefixedString())
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_retry_parameters().TryMerge(tmp)
continue
if tt == 56:
self.set_max_concurrent_requests(d.getVarInt32())
continue
if tt == 64:
self.set_mode(d.getVarInt32())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_acl().TryMerge(tmp)
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_header_override().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_bucket_refill_per_second_: res+=prefix+("bucket_refill_per_second: %s\n" % self.DebugFormat(self.bucket_refill_per_second_))
if self.has_bucket_capacity_: res+=prefix+("bucket_capacity: %s\n" % self.DebugFormatInt32(self.bucket_capacity_))
if self.has_user_specified_rate_: res+=prefix+("user_specified_rate: %s\n" % self.DebugFormatString(self.user_specified_rate_))
if self.has_retry_parameters_:
res+=prefix+"retry_parameters <\n"
res+=self.retry_parameters_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_max_concurrent_requests_: res+=prefix+("max_concurrent_requests: %s\n" % self.DebugFormatInt32(self.max_concurrent_requests_))
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
if self.has_acl_:
res+=prefix+"acl <\n"
res+=self.acl_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.header_override_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("header_override%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
kbucket_refill_per_second = 3
kbucket_capacity = 4
kuser_specified_rate = 5
kretry_parameters = 6
kmax_concurrent_requests = 7
kmode = 8
kacl = 9
kheader_override = 10
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "bucket_refill_per_second",
4: "bucket_capacity",
5: "user_specified_rate",
6: "retry_parameters",
7: "max_concurrent_requests",
8: "mode",
9: "acl",
10: "header_override",
}, 10)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
}, 10, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueUpdateQueueRequest'
class TaskQueueUpdateQueueResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueUpdateQueueResponse'
class TaskQueueFetchQueuesRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_max_rows_ = 0
max_rows_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def max_rows(self): return self.max_rows_
def set_max_rows(self, x):
self.has_max_rows_ = 1
self.max_rows_ = x
def clear_max_rows(self):
if self.has_max_rows_:
self.has_max_rows_ = 0
self.max_rows_ = 0
def has_max_rows(self): return self.has_max_rows_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_max_rows()): self.set_max_rows(x.max_rows())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_max_rows_ != x.has_max_rows_: return 0
if self.has_max_rows_ and self.max_rows_ != x.max_rows_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_max_rows_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: max_rows not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthVarInt64(self.max_rows_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_max_rows_):
n += 1
n += self.lengthVarInt64(self.max_rows_)
return n
def Clear(self):
self.clear_app_id()
self.clear_max_rows()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(16)
out.putVarInt32(self.max_rows_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_max_rows_):
out.putVarInt32(16)
out.putVarInt32(self.max_rows_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 16:
self.set_max_rows(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_max_rows_: res+=prefix+("max_rows: %s\n" % self.DebugFormatInt32(self.max_rows_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kmax_rows = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "max_rows",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchQueuesRequest'
class TaskQueueFetchQueuesResponse_Queue(ProtocolBuffer.ProtocolMessage):
has_queue_name_ = 0
queue_name_ = ""
has_bucket_refill_per_second_ = 0
bucket_refill_per_second_ = 0.0
has_bucket_capacity_ = 0
bucket_capacity_ = 0.0
has_user_specified_rate_ = 0
user_specified_rate_ = ""
has_paused_ = 0
paused_ = 0
has_retry_parameters_ = 0
retry_parameters_ = None
has_max_concurrent_requests_ = 0
max_concurrent_requests_ = 0
has_mode_ = 0
mode_ = 0
has_acl_ = 0
acl_ = None
has_creator_name_ = 0
creator_name_ = "apphosting"
def __init__(self, contents=None):
self.header_override_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def bucket_refill_per_second(self): return self.bucket_refill_per_second_
def set_bucket_refill_per_second(self, x):
self.has_bucket_refill_per_second_ = 1
self.bucket_refill_per_second_ = x
def clear_bucket_refill_per_second(self):
if self.has_bucket_refill_per_second_:
self.has_bucket_refill_per_second_ = 0
self.bucket_refill_per_second_ = 0.0
def has_bucket_refill_per_second(self): return self.has_bucket_refill_per_second_
def bucket_capacity(self): return self.bucket_capacity_
def set_bucket_capacity(self, x):
self.has_bucket_capacity_ = 1
self.bucket_capacity_ = x
def clear_bucket_capacity(self):
if self.has_bucket_capacity_:
self.has_bucket_capacity_ = 0
self.bucket_capacity_ = 0.0
def has_bucket_capacity(self): return self.has_bucket_capacity_
def user_specified_rate(self): return self.user_specified_rate_
def set_user_specified_rate(self, x):
self.has_user_specified_rate_ = 1
self.user_specified_rate_ = x
def clear_user_specified_rate(self):
if self.has_user_specified_rate_:
self.has_user_specified_rate_ = 0
self.user_specified_rate_ = ""
def has_user_specified_rate(self): return self.has_user_specified_rate_
def paused(self): return self.paused_
def set_paused(self, x):
self.has_paused_ = 1
self.paused_ = x
def clear_paused(self):
if self.has_paused_:
self.has_paused_ = 0
self.paused_ = 0
def has_paused(self): return self.has_paused_
def retry_parameters(self):
if self.retry_parameters_ is None:
self.lazy_init_lock_.acquire()
try:
if self.retry_parameters_ is None: self.retry_parameters_ = TaskQueueRetryParameters()
finally:
self.lazy_init_lock_.release()
return self.retry_parameters_
def mutable_retry_parameters(self): self.has_retry_parameters_ = 1; return self.retry_parameters()
def clear_retry_parameters(self):
# Warning: this method does not acquire the lock.
if self.has_retry_parameters_:
self.has_retry_parameters_ = 0;
if self.retry_parameters_ is not None: self.retry_parameters_.Clear()
def has_retry_parameters(self): return self.has_retry_parameters_
def max_concurrent_requests(self): return self.max_concurrent_requests_
def set_max_concurrent_requests(self, x):
self.has_max_concurrent_requests_ = 1
self.max_concurrent_requests_ = x
def clear_max_concurrent_requests(self):
if self.has_max_concurrent_requests_:
self.has_max_concurrent_requests_ = 0
self.max_concurrent_requests_ = 0
def has_max_concurrent_requests(self): return self.has_max_concurrent_requests_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 0
def has_mode(self): return self.has_mode_
def acl(self):
if self.acl_ is None:
self.lazy_init_lock_.acquire()
try:
if self.acl_ is None: self.acl_ = TaskQueueAcl()
finally:
self.lazy_init_lock_.release()
return self.acl_
def mutable_acl(self): self.has_acl_ = 1; return self.acl()
def clear_acl(self):
# Warning: this method does not acquire the lock.
if self.has_acl_:
self.has_acl_ = 0;
if self.acl_ is not None: self.acl_.Clear()
def has_acl(self): return self.has_acl_
def header_override_size(self): return len(self.header_override_)
def header_override_list(self): return self.header_override_
def header_override(self, i):
return self.header_override_[i]
def mutable_header_override(self, i):
return self.header_override_[i]
def add_header_override(self):
x = TaskQueueHttpHeader()
self.header_override_.append(x)
return x
def clear_header_override(self):
self.header_override_ = []
def creator_name(self): return self.creator_name_
def set_creator_name(self, x):
self.has_creator_name_ = 1
self.creator_name_ = x
def clear_creator_name(self):
if self.has_creator_name_:
self.has_creator_name_ = 0
self.creator_name_ = "apphosting"
def has_creator_name(self): return self.has_creator_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_bucket_refill_per_second()): self.set_bucket_refill_per_second(x.bucket_refill_per_second())
if (x.has_bucket_capacity()): self.set_bucket_capacity(x.bucket_capacity())
if (x.has_user_specified_rate()): self.set_user_specified_rate(x.user_specified_rate())
if (x.has_paused()): self.set_paused(x.paused())
if (x.has_retry_parameters()): self.mutable_retry_parameters().MergeFrom(x.retry_parameters())
if (x.has_max_concurrent_requests()): self.set_max_concurrent_requests(x.max_concurrent_requests())
if (x.has_mode()): self.set_mode(x.mode())
if (x.has_acl()): self.mutable_acl().MergeFrom(x.acl())
for i in xrange(x.header_override_size()): self.add_header_override().CopyFrom(x.header_override(i))
if (x.has_creator_name()): self.set_creator_name(x.creator_name())
def Equals(self, x):
if x is self: return 1
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_bucket_refill_per_second_ != x.has_bucket_refill_per_second_: return 0
if self.has_bucket_refill_per_second_ and self.bucket_refill_per_second_ != x.bucket_refill_per_second_: return 0
if self.has_bucket_capacity_ != x.has_bucket_capacity_: return 0
if self.has_bucket_capacity_ and self.bucket_capacity_ != x.bucket_capacity_: return 0
if self.has_user_specified_rate_ != x.has_user_specified_rate_: return 0
if self.has_user_specified_rate_ and self.user_specified_rate_ != x.user_specified_rate_: return 0
if self.has_paused_ != x.has_paused_: return 0
if self.has_paused_ and self.paused_ != x.paused_: return 0
if self.has_retry_parameters_ != x.has_retry_parameters_: return 0
if self.has_retry_parameters_ and self.retry_parameters_ != x.retry_parameters_: return 0
if self.has_max_concurrent_requests_ != x.has_max_concurrent_requests_: return 0
if self.has_max_concurrent_requests_ and self.max_concurrent_requests_ != x.max_concurrent_requests_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
if self.has_acl_ != x.has_acl_: return 0
if self.has_acl_ and self.acl_ != x.acl_: return 0
if len(self.header_override_) != len(x.header_override_): return 0
for e1, e2 in zip(self.header_override_, x.header_override_):
if e1 != e2: return 0
if self.has_creator_name_ != x.has_creator_name_: return 0
if self.has_creator_name_ and self.creator_name_ != x.creator_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_bucket_refill_per_second_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: bucket_refill_per_second not set.')
if (not self.has_bucket_capacity_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: bucket_capacity not set.')
if (not self.has_paused_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: paused not set.')
if (self.has_retry_parameters_ and not self.retry_parameters_.IsInitialized(debug_strs)): initialized = 0
if (self.has_acl_ and not self.acl_.IsInitialized(debug_strs)): initialized = 0
for p in self.header_override_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.queue_name_))
if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
if (self.has_retry_parameters_): n += 1 + self.lengthString(self.retry_parameters_.ByteSize())
if (self.has_max_concurrent_requests_): n += 1 + self.lengthVarInt64(self.max_concurrent_requests_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_acl_): n += 1 + self.lengthString(self.acl_.ByteSize())
n += 1 * len(self.header_override_)
for i in xrange(len(self.header_override_)): n += self.lengthString(self.header_override_[i].ByteSize())
if (self.has_creator_name_): n += 1 + self.lengthString(len(self.creator_name_))
return n + 21
def ByteSizePartial(self):
n = 0
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_bucket_refill_per_second_):
n += 9
if (self.has_bucket_capacity_):
n += 9
if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
if (self.has_paused_):
n += 2
if (self.has_retry_parameters_): n += 1 + self.lengthString(self.retry_parameters_.ByteSizePartial())
if (self.has_max_concurrent_requests_): n += 1 + self.lengthVarInt64(self.max_concurrent_requests_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_acl_): n += 1 + self.lengthString(self.acl_.ByteSizePartial())
n += 1 * len(self.header_override_)
for i in xrange(len(self.header_override_)): n += self.lengthString(self.header_override_[i].ByteSizePartial())
if (self.has_creator_name_): n += 1 + self.lengthString(len(self.creator_name_))
return n
def Clear(self):
self.clear_queue_name()
self.clear_bucket_refill_per_second()
self.clear_bucket_capacity()
self.clear_user_specified_rate()
self.clear_paused()
self.clear_retry_parameters()
self.clear_max_concurrent_requests()
self.clear_mode()
self.clear_acl()
self.clear_header_override()
self.clear_creator_name()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(25)
out.putDouble(self.bucket_refill_per_second_)
out.putVarInt32(33)
out.putDouble(self.bucket_capacity_)
if (self.has_user_specified_rate_):
out.putVarInt32(42)
out.putPrefixedString(self.user_specified_rate_)
out.putVarInt32(48)
out.putBoolean(self.paused_)
if (self.has_retry_parameters_):
out.putVarInt32(58)
out.putVarInt32(self.retry_parameters_.ByteSize())
self.retry_parameters_.OutputUnchecked(out)
if (self.has_max_concurrent_requests_):
out.putVarInt32(64)
out.putVarInt32(self.max_concurrent_requests_)
if (self.has_mode_):
out.putVarInt32(72)
out.putVarInt32(self.mode_)
if (self.has_acl_):
out.putVarInt32(82)
out.putVarInt32(self.acl_.ByteSize())
self.acl_.OutputUnchecked(out)
for i in xrange(len(self.header_override_)):
out.putVarInt32(90)
out.putVarInt32(self.header_override_[i].ByteSize())
self.header_override_[i].OutputUnchecked(out)
if (self.has_creator_name_):
out.putVarInt32(98)
out.putPrefixedString(self.creator_name_)
def OutputPartial(self, out):
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_bucket_refill_per_second_):
out.putVarInt32(25)
out.putDouble(self.bucket_refill_per_second_)
if (self.has_bucket_capacity_):
out.putVarInt32(33)
out.putDouble(self.bucket_capacity_)
if (self.has_user_specified_rate_):
out.putVarInt32(42)
out.putPrefixedString(self.user_specified_rate_)
if (self.has_paused_):
out.putVarInt32(48)
out.putBoolean(self.paused_)
if (self.has_retry_parameters_):
out.putVarInt32(58)
out.putVarInt32(self.retry_parameters_.ByteSizePartial())
self.retry_parameters_.OutputPartial(out)
if (self.has_max_concurrent_requests_):
out.putVarInt32(64)
out.putVarInt32(self.max_concurrent_requests_)
if (self.has_mode_):
out.putVarInt32(72)
out.putVarInt32(self.mode_)
if (self.has_acl_):
out.putVarInt32(82)
out.putVarInt32(self.acl_.ByteSizePartial())
self.acl_.OutputPartial(out)
for i in xrange(len(self.header_override_)):
out.putVarInt32(90)
out.putVarInt32(self.header_override_[i].ByteSizePartial())
self.header_override_[i].OutputPartial(out)
if (self.has_creator_name_):
out.putVarInt32(98)
out.putPrefixedString(self.creator_name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 25:
self.set_bucket_refill_per_second(d.getDouble())
continue
if tt == 33:
self.set_bucket_capacity(d.getDouble())
continue
if tt == 42:
self.set_user_specified_rate(d.getPrefixedString())
continue
if tt == 48:
self.set_paused(d.getBoolean())
continue
if tt == 58:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_retry_parameters().TryMerge(tmp)
continue
if tt == 64:
self.set_max_concurrent_requests(d.getVarInt32())
continue
if tt == 72:
self.set_mode(d.getVarInt32())
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_acl().TryMerge(tmp)
continue
if tt == 90:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_header_override().TryMerge(tmp)
continue
if tt == 98:
self.set_creator_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_bucket_refill_per_second_: res+=prefix+("bucket_refill_per_second: %s\n" % self.DebugFormat(self.bucket_refill_per_second_))
if self.has_bucket_capacity_: res+=prefix+("bucket_capacity: %s\n" % self.DebugFormat(self.bucket_capacity_))
if self.has_user_specified_rate_: res+=prefix+("user_specified_rate: %s\n" % self.DebugFormatString(self.user_specified_rate_))
if self.has_paused_: res+=prefix+("paused: %s\n" % self.DebugFormatBool(self.paused_))
if self.has_retry_parameters_:
res+=prefix+"retry_parameters <\n"
res+=self.retry_parameters_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_max_concurrent_requests_: res+=prefix+("max_concurrent_requests: %s\n" % self.DebugFormatInt32(self.max_concurrent_requests_))
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
if self.has_acl_:
res+=prefix+"acl <\n"
res+=self.acl_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.header_override_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("header_override%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_creator_name_: res+=prefix+("creator_name: %s\n" % self.DebugFormatString(self.creator_name_))
return res
class TaskQueueFetchQueuesResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.queue_ = []
if contents is not None: self.MergeFromString(contents)
def queue_size(self): return len(self.queue_)
def queue_list(self): return self.queue_
def queue(self, i):
return self.queue_[i]
def mutable_queue(self, i):
return self.queue_[i]
def add_queue(self):
x = TaskQueueFetchQueuesResponse_Queue()
self.queue_.append(x)
return x
def clear_queue(self):
self.queue_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.queue_size()): self.add_queue().CopyFrom(x.queue(i))
def Equals(self, x):
if x is self: return 1
if len(self.queue_) != len(x.queue_): return 0
for e1, e2 in zip(self.queue_, x.queue_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.queue_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.queue_)
for i in xrange(len(self.queue_)): n += self.queue_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.queue_)
for i in xrange(len(self.queue_)): n += self.queue_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_queue()
def OutputUnchecked(self, out):
for i in xrange(len(self.queue_)):
out.putVarInt32(11)
self.queue_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.queue_)):
out.putVarInt32(11)
self.queue_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_queue().TryMerge(d)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.queue_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Queue%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kQueueGroup = 1
kQueuequeue_name = 2
kQueuebucket_refill_per_second = 3
kQueuebucket_capacity = 4
kQueueuser_specified_rate = 5
kQueuepaused = 6
kQueueretry_parameters = 7
kQueuemax_concurrent_requests = 8
kQueuemode = 9
kQueueacl = 10
kQueueheader_override = 11
kQueuecreator_name = 12
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Queue",
2: "queue_name",
3: "bucket_refill_per_second",
4: "bucket_capacity",
5: "user_specified_rate",
6: "paused",
7: "retry_parameters",
8: "max_concurrent_requests",
9: "mode",
10: "acl",
11: "header_override",
12: "creator_name",
}, 12)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.STRING,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STRING,
}, 12, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchQueuesResponse'
class TaskQueueFetchQueueStatsRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_max_num_tasks_ = 0
max_num_tasks_ = 0
def __init__(self, contents=None):
self.queue_name_ = []
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name_size(self): return len(self.queue_name_)
def queue_name_list(self): return self.queue_name_
def queue_name(self, i):
return self.queue_name_[i]
def set_queue_name(self, i, x):
self.queue_name_[i] = x
def add_queue_name(self, x):
self.queue_name_.append(x)
def clear_queue_name(self):
self.queue_name_ = []
def max_num_tasks(self): return self.max_num_tasks_
def set_max_num_tasks(self, x):
self.has_max_num_tasks_ = 1
self.max_num_tasks_ = x
def clear_max_num_tasks(self):
if self.has_max_num_tasks_:
self.has_max_num_tasks_ = 0
self.max_num_tasks_ = 0
def has_max_num_tasks(self): return self.has_max_num_tasks_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
for i in xrange(x.queue_name_size()): self.add_queue_name(x.queue_name(i))
if (x.has_max_num_tasks()): self.set_max_num_tasks(x.max_num_tasks())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if len(self.queue_name_) != len(x.queue_name_): return 0
for e1, e2 in zip(self.queue_name_, x.queue_name_):
if e1 != e2: return 0
if self.has_max_num_tasks_ != x.has_max_num_tasks_: return 0
if self.has_max_num_tasks_ and self.max_num_tasks_ != x.max_num_tasks_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += 1 * len(self.queue_name_)
for i in xrange(len(self.queue_name_)): n += self.lengthString(len(self.queue_name_[i]))
if (self.has_max_num_tasks_): n += 1 + self.lengthVarInt64(self.max_num_tasks_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += 1 * len(self.queue_name_)
for i in xrange(len(self.queue_name_)): n += self.lengthString(len(self.queue_name_[i]))
if (self.has_max_num_tasks_): n += 1 + self.lengthVarInt64(self.max_num_tasks_)
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_max_num_tasks()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.queue_name_)):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_[i])
if (self.has_max_num_tasks_):
out.putVarInt32(24)
out.putVarInt32(self.max_num_tasks_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in xrange(len(self.queue_name_)):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_[i])
if (self.has_max_num_tasks_):
out.putVarInt32(24)
out.putVarInt32(self.max_num_tasks_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.add_queue_name(d.getPrefixedString())
continue
if tt == 24:
self.set_max_num_tasks(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
cnt=0
for e in self.queue_name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("queue_name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_max_num_tasks_: res+=prefix+("max_num_tasks: %s\n" % self.DebugFormatInt32(self.max_num_tasks_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
kmax_num_tasks = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "max_num_tasks",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchQueueStatsRequest'
class TaskQueueScannerQueueInfo(ProtocolBuffer.ProtocolMessage):
has_executed_last_minute_ = 0
executed_last_minute_ = 0
has_executed_last_hour_ = 0
executed_last_hour_ = 0
has_sampling_duration_seconds_ = 0
sampling_duration_seconds_ = 0.0
has_requests_in_flight_ = 0
requests_in_flight_ = 0
has_enforced_rate_ = 0
enforced_rate_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def executed_last_minute(self): return self.executed_last_minute_
def set_executed_last_minute(self, x):
self.has_executed_last_minute_ = 1
self.executed_last_minute_ = x
def clear_executed_last_minute(self):
if self.has_executed_last_minute_:
self.has_executed_last_minute_ = 0
self.executed_last_minute_ = 0
def has_executed_last_minute(self): return self.has_executed_last_minute_
def executed_last_hour(self): return self.executed_last_hour_
def set_executed_last_hour(self, x):
self.has_executed_last_hour_ = 1
self.executed_last_hour_ = x
def clear_executed_last_hour(self):
if self.has_executed_last_hour_:
self.has_executed_last_hour_ = 0
self.executed_last_hour_ = 0
def has_executed_last_hour(self): return self.has_executed_last_hour_
def sampling_duration_seconds(self): return self.sampling_duration_seconds_
def set_sampling_duration_seconds(self, x):
self.has_sampling_duration_seconds_ = 1
self.sampling_duration_seconds_ = x
def clear_sampling_duration_seconds(self):
if self.has_sampling_duration_seconds_:
self.has_sampling_duration_seconds_ = 0
self.sampling_duration_seconds_ = 0.0
def has_sampling_duration_seconds(self): return self.has_sampling_duration_seconds_
def requests_in_flight(self): return self.requests_in_flight_
def set_requests_in_flight(self, x):
self.has_requests_in_flight_ = 1
self.requests_in_flight_ = x
def clear_requests_in_flight(self):
if self.has_requests_in_flight_:
self.has_requests_in_flight_ = 0
self.requests_in_flight_ = 0
def has_requests_in_flight(self): return self.has_requests_in_flight_
def enforced_rate(self): return self.enforced_rate_
def set_enforced_rate(self, x):
self.has_enforced_rate_ = 1
self.enforced_rate_ = x
def clear_enforced_rate(self):
if self.has_enforced_rate_:
self.has_enforced_rate_ = 0
self.enforced_rate_ = 0.0
def has_enforced_rate(self): return self.has_enforced_rate_
def MergeFrom(self, x):
assert x is not self
if (x.has_executed_last_minute()): self.set_executed_last_minute(x.executed_last_minute())
if (x.has_executed_last_hour()): self.set_executed_last_hour(x.executed_last_hour())
if (x.has_sampling_duration_seconds()): self.set_sampling_duration_seconds(x.sampling_duration_seconds())
if (x.has_requests_in_flight()): self.set_requests_in_flight(x.requests_in_flight())
if (x.has_enforced_rate()): self.set_enforced_rate(x.enforced_rate())
def Equals(self, x):
if x is self: return 1
if self.has_executed_last_minute_ != x.has_executed_last_minute_: return 0
if self.has_executed_last_minute_ and self.executed_last_minute_ != x.executed_last_minute_: return 0
if self.has_executed_last_hour_ != x.has_executed_last_hour_: return 0
if self.has_executed_last_hour_ and self.executed_last_hour_ != x.executed_last_hour_: return 0
if self.has_sampling_duration_seconds_ != x.has_sampling_duration_seconds_: return 0
if self.has_sampling_duration_seconds_ and self.sampling_duration_seconds_ != x.sampling_duration_seconds_: return 0
if self.has_requests_in_flight_ != x.has_requests_in_flight_: return 0
if self.has_requests_in_flight_ and self.requests_in_flight_ != x.requests_in_flight_: return 0
if self.has_enforced_rate_ != x.has_enforced_rate_: return 0
if self.has_enforced_rate_ and self.enforced_rate_ != x.enforced_rate_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_executed_last_minute_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: executed_last_minute not set.')
if (not self.has_executed_last_hour_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: executed_last_hour not set.')
if (not self.has_sampling_duration_seconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sampling_duration_seconds not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.executed_last_minute_)
n += self.lengthVarInt64(self.executed_last_hour_)
if (self.has_requests_in_flight_): n += 1 + self.lengthVarInt64(self.requests_in_flight_)
if (self.has_enforced_rate_): n += 9
return n + 11
def ByteSizePartial(self):
n = 0
if (self.has_executed_last_minute_):
n += 1
n += self.lengthVarInt64(self.executed_last_minute_)
if (self.has_executed_last_hour_):
n += 1
n += self.lengthVarInt64(self.executed_last_hour_)
if (self.has_sampling_duration_seconds_):
n += 9
if (self.has_requests_in_flight_): n += 1 + self.lengthVarInt64(self.requests_in_flight_)
if (self.has_enforced_rate_): n += 9
return n
def Clear(self):
self.clear_executed_last_minute()
self.clear_executed_last_hour()
self.clear_sampling_duration_seconds()
self.clear_requests_in_flight()
self.clear_enforced_rate()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.executed_last_minute_)
out.putVarInt32(16)
out.putVarInt64(self.executed_last_hour_)
out.putVarInt32(25)
out.putDouble(self.sampling_duration_seconds_)
if (self.has_requests_in_flight_):
out.putVarInt32(32)
out.putVarInt32(self.requests_in_flight_)
if (self.has_enforced_rate_):
out.putVarInt32(41)
out.putDouble(self.enforced_rate_)
def OutputPartial(self, out):
if (self.has_executed_last_minute_):
out.putVarInt32(8)
out.putVarInt64(self.executed_last_minute_)
if (self.has_executed_last_hour_):
out.putVarInt32(16)
out.putVarInt64(self.executed_last_hour_)
if (self.has_sampling_duration_seconds_):
out.putVarInt32(25)
out.putDouble(self.sampling_duration_seconds_)
if (self.has_requests_in_flight_):
out.putVarInt32(32)
out.putVarInt32(self.requests_in_flight_)
if (self.has_enforced_rate_):
out.putVarInt32(41)
out.putDouble(self.enforced_rate_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_executed_last_minute(d.getVarInt64())
continue
if tt == 16:
self.set_executed_last_hour(d.getVarInt64())
continue
if tt == 25:
self.set_sampling_duration_seconds(d.getDouble())
continue
if tt == 32:
self.set_requests_in_flight(d.getVarInt32())
continue
if tt == 41:
self.set_enforced_rate(d.getDouble())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_executed_last_minute_: res+=prefix+("executed_last_minute: %s\n" % self.DebugFormatInt64(self.executed_last_minute_))
if self.has_executed_last_hour_: res+=prefix+("executed_last_hour: %s\n" % self.DebugFormatInt64(self.executed_last_hour_))
if self.has_sampling_duration_seconds_: res+=prefix+("sampling_duration_seconds: %s\n" % self.DebugFormat(self.sampling_duration_seconds_))
if self.has_requests_in_flight_: res+=prefix+("requests_in_flight: %s\n" % self.DebugFormatInt32(self.requests_in_flight_))
if self.has_enforced_rate_: res+=prefix+("enforced_rate: %s\n" % self.DebugFormat(self.enforced_rate_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kexecuted_last_minute = 1
kexecuted_last_hour = 2
ksampling_duration_seconds = 3
krequests_in_flight = 4
kenforced_rate = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "executed_last_minute",
2: "executed_last_hour",
3: "sampling_duration_seconds",
4: "requests_in_flight",
5: "enforced_rate",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.DOUBLE,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueScannerQueueInfo'
class TaskQueueFetchQueueStatsResponse_QueueStats(ProtocolBuffer.ProtocolMessage):
has_num_tasks_ = 0
num_tasks_ = 0
has_oldest_eta_usec_ = 0
oldest_eta_usec_ = 0
has_scanner_info_ = 0
scanner_info_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def num_tasks(self): return self.num_tasks_
def set_num_tasks(self, x):
self.has_num_tasks_ = 1
self.num_tasks_ = x
def clear_num_tasks(self):
if self.has_num_tasks_:
self.has_num_tasks_ = 0
self.num_tasks_ = 0
def has_num_tasks(self): return self.has_num_tasks_
def oldest_eta_usec(self): return self.oldest_eta_usec_
def set_oldest_eta_usec(self, x):
self.has_oldest_eta_usec_ = 1
self.oldest_eta_usec_ = x
def clear_oldest_eta_usec(self):
if self.has_oldest_eta_usec_:
self.has_oldest_eta_usec_ = 0
self.oldest_eta_usec_ = 0
def has_oldest_eta_usec(self): return self.has_oldest_eta_usec_
def scanner_info(self):
if self.scanner_info_ is None:
self.lazy_init_lock_.acquire()
try:
if self.scanner_info_ is None: self.scanner_info_ = TaskQueueScannerQueueInfo()
finally:
self.lazy_init_lock_.release()
return self.scanner_info_
def mutable_scanner_info(self): self.has_scanner_info_ = 1; return self.scanner_info()
def clear_scanner_info(self):
# Warning: this method does not acquire the lock.
if self.has_scanner_info_:
self.has_scanner_info_ = 0;
if self.scanner_info_ is not None: self.scanner_info_.Clear()
def has_scanner_info(self): return self.has_scanner_info_
def MergeFrom(self, x):
assert x is not self
if (x.has_num_tasks()): self.set_num_tasks(x.num_tasks())
if (x.has_oldest_eta_usec()): self.set_oldest_eta_usec(x.oldest_eta_usec())
if (x.has_scanner_info()): self.mutable_scanner_info().MergeFrom(x.scanner_info())
def Equals(self, x):
if x is self: return 1
if self.has_num_tasks_ != x.has_num_tasks_: return 0
if self.has_num_tasks_ and self.num_tasks_ != x.num_tasks_: return 0
if self.has_oldest_eta_usec_ != x.has_oldest_eta_usec_: return 0
if self.has_oldest_eta_usec_ and self.oldest_eta_usec_ != x.oldest_eta_usec_: return 0
if self.has_scanner_info_ != x.has_scanner_info_: return 0
if self.has_scanner_info_ and self.scanner_info_ != x.scanner_info_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_num_tasks_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: num_tasks not set.')
if (not self.has_oldest_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: oldest_eta_usec not set.')
if (self.has_scanner_info_ and not self.scanner_info_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.num_tasks_)
n += self.lengthVarInt64(self.oldest_eta_usec_)
if (self.has_scanner_info_): n += 1 + self.lengthString(self.scanner_info_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_num_tasks_):
n += 1
n += self.lengthVarInt64(self.num_tasks_)
if (self.has_oldest_eta_usec_):
n += 1
n += self.lengthVarInt64(self.oldest_eta_usec_)
if (self.has_scanner_info_): n += 1 + self.lengthString(self.scanner_info_.ByteSizePartial())
return n
def Clear(self):
self.clear_num_tasks()
self.clear_oldest_eta_usec()
self.clear_scanner_info()
def OutputUnchecked(self, out):
out.putVarInt32(16)
out.putVarInt32(self.num_tasks_)
out.putVarInt32(24)
out.putVarInt64(self.oldest_eta_usec_)
if (self.has_scanner_info_):
out.putVarInt32(34)
out.putVarInt32(self.scanner_info_.ByteSize())
self.scanner_info_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_num_tasks_):
out.putVarInt32(16)
out.putVarInt32(self.num_tasks_)
if (self.has_oldest_eta_usec_):
out.putVarInt32(24)
out.putVarInt64(self.oldest_eta_usec_)
if (self.has_scanner_info_):
out.putVarInt32(34)
out.putVarInt32(self.scanner_info_.ByteSizePartial())
self.scanner_info_.OutputPartial(out)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 16:
self.set_num_tasks(d.getVarInt32())
continue
if tt == 24:
self.set_oldest_eta_usec(d.getVarInt64())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_scanner_info().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_num_tasks_: res+=prefix+("num_tasks: %s\n" % self.DebugFormatInt32(self.num_tasks_))
if self.has_oldest_eta_usec_: res+=prefix+("oldest_eta_usec: %s\n" % self.DebugFormatInt64(self.oldest_eta_usec_))
if self.has_scanner_info_:
res+=prefix+"scanner_info <\n"
res+=self.scanner_info_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
class TaskQueueFetchQueueStatsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.queuestats_ = []
if contents is not None: self.MergeFromString(contents)
def queuestats_size(self): return len(self.queuestats_)
def queuestats_list(self): return self.queuestats_
def queuestats(self, i):
return self.queuestats_[i]
def mutable_queuestats(self, i):
return self.queuestats_[i]
def add_queuestats(self):
x = TaskQueueFetchQueueStatsResponse_QueueStats()
self.queuestats_.append(x)
return x
def clear_queuestats(self):
self.queuestats_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.queuestats_size()): self.add_queuestats().CopyFrom(x.queuestats(i))
def Equals(self, x):
if x is self: return 1
if len(self.queuestats_) != len(x.queuestats_): return 0
for e1, e2 in zip(self.queuestats_, x.queuestats_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.queuestats_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.queuestats_)
for i in xrange(len(self.queuestats_)): n += self.queuestats_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.queuestats_)
for i in xrange(len(self.queuestats_)): n += self.queuestats_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_queuestats()
def OutputUnchecked(self, out):
for i in xrange(len(self.queuestats_)):
out.putVarInt32(11)
self.queuestats_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.queuestats_)):
out.putVarInt32(11)
self.queuestats_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_queuestats().TryMerge(d)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.queuestats_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("QueueStats%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kQueueStatsGroup = 1
kQueueStatsnum_tasks = 2
kQueueStatsoldest_eta_usec = 3
kQueueStatsscanner_info = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "QueueStats",
2: "num_tasks",
3: "oldest_eta_usec",
4: "scanner_info",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchQueueStatsResponse'
class TaskQueuePauseQueueRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
has_pause_ = 0
pause_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def pause(self): return self.pause_
def set_pause(self, x):
self.has_pause_ = 1
self.pause_ = x
def clear_pause(self):
if self.has_pause_:
self.has_pause_ = 0
self.pause_ = 0
def has_pause(self): return self.has_pause_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_pause()): self.set_pause(x.pause())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_pause_ != x.has_pause_: return 0
if self.has_pause_ and self.pause_ != x.pause_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_pause_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: pause not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_pause_):
n += 2
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_pause()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(24)
out.putBoolean(self.pause_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_pause_):
out.putVarInt32(24)
out.putBoolean(self.pause_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 24:
self.set_pause(d.getBoolean())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_pause_: res+=prefix+("pause: %s\n" % self.DebugFormatBool(self.pause_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
kpause = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "pause",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueuePauseQueueRequest'
class TaskQueuePauseQueueResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueuePauseQueueResponse'
class TaskQueuePurgeQueueRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueuePurgeQueueRequest'
class TaskQueuePurgeQueueResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueuePurgeQueueResponse'
class TaskQueueDeleteQueueRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteQueueRequest'
class TaskQueueDeleteQueueResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteQueueResponse'
class TaskQueueDeleteGroupRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteGroupRequest'
class TaskQueueDeleteGroupResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueDeleteGroupResponse'
class TaskQueueQueryTasksRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
has_start_task_name_ = 0
start_task_name_ = ""
has_start_eta_usec_ = 0
start_eta_usec_ = 0
has_start_tag_ = 0
start_tag_ = ""
has_max_rows_ = 0
max_rows_ = 1
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def start_task_name(self): return self.start_task_name_
def set_start_task_name(self, x):
self.has_start_task_name_ = 1
self.start_task_name_ = x
def clear_start_task_name(self):
if self.has_start_task_name_:
self.has_start_task_name_ = 0
self.start_task_name_ = ""
def has_start_task_name(self): return self.has_start_task_name_
def start_eta_usec(self): return self.start_eta_usec_
def set_start_eta_usec(self, x):
self.has_start_eta_usec_ = 1
self.start_eta_usec_ = x
def clear_start_eta_usec(self):
if self.has_start_eta_usec_:
self.has_start_eta_usec_ = 0
self.start_eta_usec_ = 0
def has_start_eta_usec(self): return self.has_start_eta_usec_
def start_tag(self): return self.start_tag_
def set_start_tag(self, x):
self.has_start_tag_ = 1
self.start_tag_ = x
def clear_start_tag(self):
if self.has_start_tag_:
self.has_start_tag_ = 0
self.start_tag_ = ""
def has_start_tag(self): return self.has_start_tag_
def max_rows(self): return self.max_rows_
def set_max_rows(self, x):
self.has_max_rows_ = 1
self.max_rows_ = x
def clear_max_rows(self):
if self.has_max_rows_:
self.has_max_rows_ = 0
self.max_rows_ = 1
def has_max_rows(self): return self.has_max_rows_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_start_task_name()): self.set_start_task_name(x.start_task_name())
if (x.has_start_eta_usec()): self.set_start_eta_usec(x.start_eta_usec())
if (x.has_start_tag()): self.set_start_tag(x.start_tag())
if (x.has_max_rows()): self.set_max_rows(x.max_rows())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_start_task_name_ != x.has_start_task_name_: return 0
if self.has_start_task_name_ and self.start_task_name_ != x.start_task_name_: return 0
if self.has_start_eta_usec_ != x.has_start_eta_usec_: return 0
if self.has_start_eta_usec_ and self.start_eta_usec_ != x.start_eta_usec_: return 0
if self.has_start_tag_ != x.has_start_tag_: return 0
if self.has_start_tag_ and self.start_tag_ != x.start_tag_: return 0
if self.has_max_rows_ != x.has_max_rows_: return 0
if self.has_max_rows_ and self.max_rows_ != x.max_rows_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
if (self.has_start_task_name_): n += 1 + self.lengthString(len(self.start_task_name_))
if (self.has_start_eta_usec_): n += 1 + self.lengthVarInt64(self.start_eta_usec_)
if (self.has_start_tag_): n += 1 + self.lengthString(len(self.start_tag_))
if (self.has_max_rows_): n += 1 + self.lengthVarInt64(self.max_rows_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_start_task_name_): n += 1 + self.lengthString(len(self.start_task_name_))
if (self.has_start_eta_usec_): n += 1 + self.lengthVarInt64(self.start_eta_usec_)
if (self.has_start_tag_): n += 1 + self.lengthString(len(self.start_tag_))
if (self.has_max_rows_): n += 1 + self.lengthVarInt64(self.max_rows_)
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_start_task_name()
self.clear_start_eta_usec()
self.clear_start_tag()
self.clear_max_rows()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_start_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.start_task_name_)
if (self.has_start_eta_usec_):
out.putVarInt32(32)
out.putVarInt64(self.start_eta_usec_)
if (self.has_max_rows_):
out.putVarInt32(40)
out.putVarInt32(self.max_rows_)
if (self.has_start_tag_):
out.putVarInt32(50)
out.putPrefixedString(self.start_tag_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_start_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.start_task_name_)
if (self.has_start_eta_usec_):
out.putVarInt32(32)
out.putVarInt64(self.start_eta_usec_)
if (self.has_max_rows_):
out.putVarInt32(40)
out.putVarInt32(self.max_rows_)
if (self.has_start_tag_):
out.putVarInt32(50)
out.putPrefixedString(self.start_tag_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 26:
self.set_start_task_name(d.getPrefixedString())
continue
if tt == 32:
self.set_start_eta_usec(d.getVarInt64())
continue
if tt == 40:
self.set_max_rows(d.getVarInt32())
continue
if tt == 50:
self.set_start_tag(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_start_task_name_: res+=prefix+("start_task_name: %s\n" % self.DebugFormatString(self.start_task_name_))
if self.has_start_eta_usec_: res+=prefix+("start_eta_usec: %s\n" % self.DebugFormatInt64(self.start_eta_usec_))
if self.has_start_tag_: res+=prefix+("start_tag: %s\n" % self.DebugFormatString(self.start_tag_))
if self.has_max_rows_: res+=prefix+("max_rows: %s\n" % self.DebugFormatInt32(self.max_rows_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
kstart_task_name = 3
kstart_eta_usec = 4
kstart_tag = 6
kmax_rows = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "start_task_name",
4: "start_eta_usec",
5: "max_rows",
6: "start_tag",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueQueryTasksRequest'
class TaskQueueQueryTasksResponse_TaskHeader(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.key_))
n += self.lengthString(len(self.value_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(len(self.key_))
if (self.has_value_):
n += 1
n += self.lengthString(len(self.value_))
return n
def Clear(self):
self.clear_key()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(66)
out.putPrefixedString(self.key_)
out.putVarInt32(74)
out.putPrefixedString(self.value_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(66)
out.putPrefixedString(self.key_)
if (self.has_value_):
out.putVarInt32(74)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 60: break
if tt == 66:
self.set_key(d.getPrefixedString())
continue
if tt == 74:
self.set_value(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
class TaskQueueQueryTasksResponse_TaskCronTimetable(ProtocolBuffer.ProtocolMessage):
has_schedule_ = 0
schedule_ = ""
has_timezone_ = 0
timezone_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def schedule(self): return self.schedule_
def set_schedule(self, x):
self.has_schedule_ = 1
self.schedule_ = x
def clear_schedule(self):
if self.has_schedule_:
self.has_schedule_ = 0
self.schedule_ = ""
def has_schedule(self): return self.has_schedule_
def timezone(self): return self.timezone_
def set_timezone(self, x):
self.has_timezone_ = 1
self.timezone_ = x
def clear_timezone(self):
if self.has_timezone_:
self.has_timezone_ = 0
self.timezone_ = ""
def has_timezone(self): return self.has_timezone_
def MergeFrom(self, x):
assert x is not self
if (x.has_schedule()): self.set_schedule(x.schedule())
if (x.has_timezone()): self.set_timezone(x.timezone())
def Equals(self, x):
if x is self: return 1
if self.has_schedule_ != x.has_schedule_: return 0
if self.has_schedule_ and self.schedule_ != x.schedule_: return 0
if self.has_timezone_ != x.has_timezone_: return 0
if self.has_timezone_ and self.timezone_ != x.timezone_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_schedule_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: schedule not set.')
if (not self.has_timezone_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: timezone not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.schedule_))
n += self.lengthString(len(self.timezone_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_schedule_):
n += 1
n += self.lengthString(len(self.schedule_))
if (self.has_timezone_):
n += 1
n += self.lengthString(len(self.timezone_))
return n
def Clear(self):
self.clear_schedule()
self.clear_timezone()
def OutputUnchecked(self, out):
out.putVarInt32(114)
out.putPrefixedString(self.schedule_)
out.putVarInt32(122)
out.putPrefixedString(self.timezone_)
def OutputPartial(self, out):
if (self.has_schedule_):
out.putVarInt32(114)
out.putPrefixedString(self.schedule_)
if (self.has_timezone_):
out.putVarInt32(122)
out.putPrefixedString(self.timezone_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 108: break
if tt == 114:
self.set_schedule(d.getPrefixedString())
continue
if tt == 122:
self.set_timezone(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_schedule_: res+=prefix+("schedule: %s\n" % self.DebugFormatString(self.schedule_))
if self.has_timezone_: res+=prefix+("timezone: %s\n" % self.DebugFormatString(self.timezone_))
return res
class TaskQueueQueryTasksResponse_TaskRunLog(ProtocolBuffer.ProtocolMessage):
has_dispatched_usec_ = 0
dispatched_usec_ = 0
has_lag_usec_ = 0
lag_usec_ = 0
has_elapsed_usec_ = 0
elapsed_usec_ = 0
has_response_code_ = 0
response_code_ = 0
has_retry_reason_ = 0
retry_reason_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def dispatched_usec(self): return self.dispatched_usec_
def set_dispatched_usec(self, x):
self.has_dispatched_usec_ = 1
self.dispatched_usec_ = x
def clear_dispatched_usec(self):
if self.has_dispatched_usec_:
self.has_dispatched_usec_ = 0
self.dispatched_usec_ = 0
def has_dispatched_usec(self): return self.has_dispatched_usec_
def lag_usec(self): return self.lag_usec_
def set_lag_usec(self, x):
self.has_lag_usec_ = 1
self.lag_usec_ = x
def clear_lag_usec(self):
if self.has_lag_usec_:
self.has_lag_usec_ = 0
self.lag_usec_ = 0
def has_lag_usec(self): return self.has_lag_usec_
def elapsed_usec(self): return self.elapsed_usec_
def set_elapsed_usec(self, x):
self.has_elapsed_usec_ = 1
self.elapsed_usec_ = x
def clear_elapsed_usec(self):
if self.has_elapsed_usec_:
self.has_elapsed_usec_ = 0
self.elapsed_usec_ = 0
def has_elapsed_usec(self): return self.has_elapsed_usec_
def response_code(self): return self.response_code_
def set_response_code(self, x):
self.has_response_code_ = 1
self.response_code_ = x
def clear_response_code(self):
if self.has_response_code_:
self.has_response_code_ = 0
self.response_code_ = 0
def has_response_code(self): return self.has_response_code_
def retry_reason(self): return self.retry_reason_
def set_retry_reason(self, x):
self.has_retry_reason_ = 1
self.retry_reason_ = x
def clear_retry_reason(self):
if self.has_retry_reason_:
self.has_retry_reason_ = 0
self.retry_reason_ = ""
def has_retry_reason(self): return self.has_retry_reason_
def MergeFrom(self, x):
assert x is not self
if (x.has_dispatched_usec()): self.set_dispatched_usec(x.dispatched_usec())
if (x.has_lag_usec()): self.set_lag_usec(x.lag_usec())
if (x.has_elapsed_usec()): self.set_elapsed_usec(x.elapsed_usec())
if (x.has_response_code()): self.set_response_code(x.response_code())
if (x.has_retry_reason()): self.set_retry_reason(x.retry_reason())
def Equals(self, x):
if x is self: return 1
if self.has_dispatched_usec_ != x.has_dispatched_usec_: return 0
if self.has_dispatched_usec_ and self.dispatched_usec_ != x.dispatched_usec_: return 0
if self.has_lag_usec_ != x.has_lag_usec_: return 0
if self.has_lag_usec_ and self.lag_usec_ != x.lag_usec_: return 0
if self.has_elapsed_usec_ != x.has_elapsed_usec_: return 0
if self.has_elapsed_usec_ and self.elapsed_usec_ != x.elapsed_usec_: return 0
if self.has_response_code_ != x.has_response_code_: return 0
if self.has_response_code_ and self.response_code_ != x.response_code_: return 0
if self.has_retry_reason_ != x.has_retry_reason_: return 0
if self.has_retry_reason_ and self.retry_reason_ != x.retry_reason_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_dispatched_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: dispatched_usec not set.')
if (not self.has_lag_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lag_usec not set.')
if (not self.has_elapsed_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: elapsed_usec not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.dispatched_usec_)
n += self.lengthVarInt64(self.lag_usec_)
n += self.lengthVarInt64(self.elapsed_usec_)
if (self.has_response_code_): n += 2 + self.lengthVarInt64(self.response_code_)
if (self.has_retry_reason_): n += 2 + self.lengthString(len(self.retry_reason_))
return n + 6
def ByteSizePartial(self):
n = 0
if (self.has_dispatched_usec_):
n += 2
n += self.lengthVarInt64(self.dispatched_usec_)
if (self.has_lag_usec_):
n += 2
n += self.lengthVarInt64(self.lag_usec_)
if (self.has_elapsed_usec_):
n += 2
n += self.lengthVarInt64(self.elapsed_usec_)
if (self.has_response_code_): n += 2 + self.lengthVarInt64(self.response_code_)
if (self.has_retry_reason_): n += 2 + self.lengthString(len(self.retry_reason_))
return n
def Clear(self):
self.clear_dispatched_usec()
self.clear_lag_usec()
self.clear_elapsed_usec()
self.clear_response_code()
self.clear_retry_reason()
def OutputUnchecked(self, out):
out.putVarInt32(136)
out.putVarInt64(self.dispatched_usec_)
out.putVarInt32(144)
out.putVarInt64(self.lag_usec_)
out.putVarInt32(152)
out.putVarInt64(self.elapsed_usec_)
if (self.has_response_code_):
out.putVarInt32(160)
out.putVarInt64(self.response_code_)
if (self.has_retry_reason_):
out.putVarInt32(218)
out.putPrefixedString(self.retry_reason_)
def OutputPartial(self, out):
if (self.has_dispatched_usec_):
out.putVarInt32(136)
out.putVarInt64(self.dispatched_usec_)
if (self.has_lag_usec_):
out.putVarInt32(144)
out.putVarInt64(self.lag_usec_)
if (self.has_elapsed_usec_):
out.putVarInt32(152)
out.putVarInt64(self.elapsed_usec_)
if (self.has_response_code_):
out.putVarInt32(160)
out.putVarInt64(self.response_code_)
if (self.has_retry_reason_):
out.putVarInt32(218)
out.putPrefixedString(self.retry_reason_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 132: break
if tt == 136:
self.set_dispatched_usec(d.getVarInt64())
continue
if tt == 144:
self.set_lag_usec(d.getVarInt64())
continue
if tt == 152:
self.set_elapsed_usec(d.getVarInt64())
continue
if tt == 160:
self.set_response_code(d.getVarInt64())
continue
if tt == 218:
self.set_retry_reason(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_dispatched_usec_: res+=prefix+("dispatched_usec: %s\n" % self.DebugFormatInt64(self.dispatched_usec_))
if self.has_lag_usec_: res+=prefix+("lag_usec: %s\n" % self.DebugFormatInt64(self.lag_usec_))
if self.has_elapsed_usec_: res+=prefix+("elapsed_usec: %s\n" % self.DebugFormatInt64(self.elapsed_usec_))
if self.has_response_code_: res+=prefix+("response_code: %s\n" % self.DebugFormatInt64(self.response_code_))
if self.has_retry_reason_: res+=prefix+("retry_reason: %s\n" % self.DebugFormatString(self.retry_reason_))
return res
class TaskQueueQueryTasksResponse_Task(ProtocolBuffer.ProtocolMessage):
# RequestMethod values
GET = 1
POST = 2
HEAD = 3
PUT = 4
DELETE = 5
_RequestMethod_NAMES = {
1: "GET",
2: "POST",
3: "HEAD",
4: "PUT",
5: "DELETE",
}
def RequestMethod_Name(cls, x): return cls._RequestMethod_NAMES.get(x, "")
RequestMethod_Name = classmethod(RequestMethod_Name)
has_task_name_ = 0
task_name_ = ""
has_eta_usec_ = 0
eta_usec_ = 0
has_url_ = 0
url_ = ""
has_method_ = 0
method_ = 0
has_retry_count_ = 0
retry_count_ = 0
has_body_size_ = 0
body_size_ = 0
has_body_ = 0
body_ = ""
has_creation_time_usec_ = 0
creation_time_usec_ = 0
has_crontimetable_ = 0
crontimetable_ = None
has_runlog_ = 0
runlog_ = None
has_description_ = 0
description_ = ""
has_payload_ = 0
payload_ = None
has_retry_parameters_ = 0
retry_parameters_ = None
has_first_try_usec_ = 0
first_try_usec_ = 0
has_tag_ = 0
tag_ = ""
has_execution_count_ = 0
execution_count_ = 0
def __init__(self, contents=None):
self.header_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def eta_usec(self): return self.eta_usec_
def set_eta_usec(self, x):
self.has_eta_usec_ = 1
self.eta_usec_ = x
def clear_eta_usec(self):
if self.has_eta_usec_:
self.has_eta_usec_ = 0
self.eta_usec_ = 0
def has_eta_usec(self): return self.has_eta_usec_
def url(self): return self.url_
def set_url(self, x):
self.has_url_ = 1
self.url_ = x
def clear_url(self):
if self.has_url_:
self.has_url_ = 0
self.url_ = ""
def has_url(self): return self.has_url_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = 0
def has_method(self): return self.has_method_
def retry_count(self): return self.retry_count_
def set_retry_count(self, x):
self.has_retry_count_ = 1
self.retry_count_ = x
def clear_retry_count(self):
if self.has_retry_count_:
self.has_retry_count_ = 0
self.retry_count_ = 0
def has_retry_count(self): return self.has_retry_count_
def header_size(self): return len(self.header_)
def header_list(self): return self.header_
def header(self, i):
return self.header_[i]
def mutable_header(self, i):
return self.header_[i]
def add_header(self):
x = TaskQueueQueryTasksResponse_TaskHeader()
self.header_.append(x)
return x
def clear_header(self):
self.header_ = []
def body_size(self): return self.body_size_
def set_body_size(self, x):
self.has_body_size_ = 1
self.body_size_ = x
def clear_body_size(self):
if self.has_body_size_:
self.has_body_size_ = 0
self.body_size_ = 0
def has_body_size(self): return self.has_body_size_
def body(self): return self.body_
def set_body(self, x):
self.has_body_ = 1
self.body_ = x
def clear_body(self):
if self.has_body_:
self.has_body_ = 0
self.body_ = ""
def has_body(self): return self.has_body_
def creation_time_usec(self): return self.creation_time_usec_
def set_creation_time_usec(self, x):
self.has_creation_time_usec_ = 1
self.creation_time_usec_ = x
def clear_creation_time_usec(self):
if self.has_creation_time_usec_:
self.has_creation_time_usec_ = 0
self.creation_time_usec_ = 0
def has_creation_time_usec(self): return self.has_creation_time_usec_
def crontimetable(self):
if self.crontimetable_ is None:
self.lazy_init_lock_.acquire()
try:
if self.crontimetable_ is None: self.crontimetable_ = TaskQueueQueryTasksResponse_TaskCronTimetable()
finally:
self.lazy_init_lock_.release()
return self.crontimetable_
def mutable_crontimetable(self): self.has_crontimetable_ = 1; return self.crontimetable()
def clear_crontimetable(self):
# Warning: this method does not acquire the lock.
if self.has_crontimetable_:
self.has_crontimetable_ = 0;
if self.crontimetable_ is not None: self.crontimetable_.Clear()
def has_crontimetable(self): return self.has_crontimetable_
def runlog(self):
if self.runlog_ is None:
self.lazy_init_lock_.acquire()
try:
if self.runlog_ is None: self.runlog_ = TaskQueueQueryTasksResponse_TaskRunLog()
finally:
self.lazy_init_lock_.release()
return self.runlog_
def mutable_runlog(self): self.has_runlog_ = 1; return self.runlog()
def clear_runlog(self):
# Warning: this method does not acquire the lock.
if self.has_runlog_:
self.has_runlog_ = 0;
if self.runlog_ is not None: self.runlog_.Clear()
def has_runlog(self): return self.has_runlog_
def description(self): return self.description_
def set_description(self, x):
self.has_description_ = 1
self.description_ = x
def clear_description(self):
if self.has_description_:
self.has_description_ = 0
self.description_ = ""
def has_description(self): return self.has_description_
def payload(self):
if self.payload_ is None:
self.lazy_init_lock_.acquire()
try:
if self.payload_ is None: self.payload_ = MessageSet()
finally:
self.lazy_init_lock_.release()
return self.payload_
def mutable_payload(self): self.has_payload_ = 1; return self.payload()
def clear_payload(self):
# Warning: this method does not acquire the lock.
if self.has_payload_:
self.has_payload_ = 0;
if self.payload_ is not None: self.payload_.Clear()
def has_payload(self): return self.has_payload_
def retry_parameters(self):
if self.retry_parameters_ is None:
self.lazy_init_lock_.acquire()
try:
if self.retry_parameters_ is None: self.retry_parameters_ = TaskQueueRetryParameters()
finally:
self.lazy_init_lock_.release()
return self.retry_parameters_
def mutable_retry_parameters(self): self.has_retry_parameters_ = 1; return self.retry_parameters()
def clear_retry_parameters(self):
# Warning: this method does not acquire the lock.
if self.has_retry_parameters_:
self.has_retry_parameters_ = 0;
if self.retry_parameters_ is not None: self.retry_parameters_.Clear()
def has_retry_parameters(self): return self.has_retry_parameters_
def first_try_usec(self): return self.first_try_usec_
def set_first_try_usec(self, x):
self.has_first_try_usec_ = 1
self.first_try_usec_ = x
def clear_first_try_usec(self):
if self.has_first_try_usec_:
self.has_first_try_usec_ = 0
self.first_try_usec_ = 0
def has_first_try_usec(self): return self.has_first_try_usec_
def tag(self): return self.tag_
def set_tag(self, x):
self.has_tag_ = 1
self.tag_ = x
def clear_tag(self):
if self.has_tag_:
self.has_tag_ = 0
self.tag_ = ""
def has_tag(self): return self.has_tag_
def execution_count(self): return self.execution_count_
def set_execution_count(self, x):
self.has_execution_count_ = 1
self.execution_count_ = x
def clear_execution_count(self):
if self.has_execution_count_:
self.has_execution_count_ = 0
self.execution_count_ = 0
def has_execution_count(self): return self.has_execution_count_
def MergeFrom(self, x):
assert x is not self
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_eta_usec()): self.set_eta_usec(x.eta_usec())
if (x.has_url()): self.set_url(x.url())
if (x.has_method()): self.set_method(x.method())
if (x.has_retry_count()): self.set_retry_count(x.retry_count())
for i in xrange(x.header_size()): self.add_header().CopyFrom(x.header(i))
if (x.has_body_size()): self.set_body_size(x.body_size())
if (x.has_body()): self.set_body(x.body())
if (x.has_creation_time_usec()): self.set_creation_time_usec(x.creation_time_usec())
if (x.has_crontimetable()): self.mutable_crontimetable().MergeFrom(x.crontimetable())
if (x.has_runlog()): self.mutable_runlog().MergeFrom(x.runlog())
if (x.has_description()): self.set_description(x.description())
if (x.has_payload()): self.mutable_payload().MergeFrom(x.payload())
if (x.has_retry_parameters()): self.mutable_retry_parameters().MergeFrom(x.retry_parameters())
if (x.has_first_try_usec()): self.set_first_try_usec(x.first_try_usec())
if (x.has_tag()): self.set_tag(x.tag())
if (x.has_execution_count()): self.set_execution_count(x.execution_count())
def Equals(self, x):
if x is self: return 1
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_eta_usec_ != x.has_eta_usec_: return 0
if self.has_eta_usec_ and self.eta_usec_ != x.eta_usec_: return 0
if self.has_url_ != x.has_url_: return 0
if self.has_url_ and self.url_ != x.url_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_retry_count_ != x.has_retry_count_: return 0
if self.has_retry_count_ and self.retry_count_ != x.retry_count_: return 0
if len(self.header_) != len(x.header_): return 0
for e1, e2 in zip(self.header_, x.header_):
if e1 != e2: return 0
if self.has_body_size_ != x.has_body_size_: return 0
if self.has_body_size_ and self.body_size_ != x.body_size_: return 0
if self.has_body_ != x.has_body_: return 0
if self.has_body_ and self.body_ != x.body_: return 0
if self.has_creation_time_usec_ != x.has_creation_time_usec_: return 0
if self.has_creation_time_usec_ and self.creation_time_usec_ != x.creation_time_usec_: return 0
if self.has_crontimetable_ != x.has_crontimetable_: return 0
if self.has_crontimetable_ and self.crontimetable_ != x.crontimetable_: return 0
if self.has_runlog_ != x.has_runlog_: return 0
if self.has_runlog_ and self.runlog_ != x.runlog_: return 0
if self.has_description_ != x.has_description_: return 0
if self.has_description_ and self.description_ != x.description_: return 0
if self.has_payload_ != x.has_payload_: return 0
if self.has_payload_ and self.payload_ != x.payload_: return 0
if self.has_retry_parameters_ != x.has_retry_parameters_: return 0
if self.has_retry_parameters_ and self.retry_parameters_ != x.retry_parameters_: return 0
if self.has_first_try_usec_ != x.has_first_try_usec_: return 0
if self.has_first_try_usec_ and self.first_try_usec_ != x.first_try_usec_: return 0
if self.has_tag_ != x.has_tag_: return 0
if self.has_tag_ and self.tag_ != x.tag_: return 0
if self.has_execution_count_ != x.has_execution_count_: return 0
if self.has_execution_count_ and self.execution_count_ != x.execution_count_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
if (not self.has_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: eta_usec not set.')
for p in self.header_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_creation_time_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: creation_time_usec not set.')
if (self.has_crontimetable_ and not self.crontimetable_.IsInitialized(debug_strs)): initialized = 0
if (self.has_runlog_ and not self.runlog_.IsInitialized(debug_strs)): initialized = 0
if (self.has_payload_ and not self.payload_.IsInitialized(debug_strs)): initialized = 0
if (self.has_retry_parameters_ and not self.retry_parameters_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.task_name_))
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_url_): n += 1 + self.lengthString(len(self.url_))
if (self.has_method_): n += 1 + self.lengthVarInt64(self.method_)
if (self.has_retry_count_): n += 1 + self.lengthVarInt64(self.retry_count_)
n += 2 * len(self.header_)
for i in xrange(len(self.header_)): n += self.header_[i].ByteSize()
if (self.has_body_size_): n += 1 + self.lengthVarInt64(self.body_size_)
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
n += self.lengthVarInt64(self.creation_time_usec_)
if (self.has_crontimetable_): n += 2 + self.crontimetable_.ByteSize()
if (self.has_runlog_): n += 4 + self.runlog_.ByteSize()
if (self.has_description_): n += 2 + self.lengthString(len(self.description_))
if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSize())
if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSize())
if (self.has_first_try_usec_): n += 2 + self.lengthVarInt64(self.first_try_usec_)
if (self.has_tag_): n += 2 + self.lengthString(len(self.tag_))
if (self.has_execution_count_): n += 2 + self.lengthVarInt64(self.execution_count_)
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
if (self.has_eta_usec_):
n += 1
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_url_): n += 1 + self.lengthString(len(self.url_))
if (self.has_method_): n += 1 + self.lengthVarInt64(self.method_)
if (self.has_retry_count_): n += 1 + self.lengthVarInt64(self.retry_count_)
n += 2 * len(self.header_)
for i in xrange(len(self.header_)): n += self.header_[i].ByteSizePartial()
if (self.has_body_size_): n += 1 + self.lengthVarInt64(self.body_size_)
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
if (self.has_creation_time_usec_):
n += 1
n += self.lengthVarInt64(self.creation_time_usec_)
if (self.has_crontimetable_): n += 2 + self.crontimetable_.ByteSizePartial()
if (self.has_runlog_): n += 4 + self.runlog_.ByteSizePartial()
if (self.has_description_): n += 2 + self.lengthString(len(self.description_))
if (self.has_payload_): n += 2 + self.lengthString(self.payload_.ByteSizePartial())
if (self.has_retry_parameters_): n += 2 + self.lengthString(self.retry_parameters_.ByteSizePartial())
if (self.has_first_try_usec_): n += 2 + self.lengthVarInt64(self.first_try_usec_)
if (self.has_tag_): n += 2 + self.lengthString(len(self.tag_))
if (self.has_execution_count_): n += 2 + self.lengthVarInt64(self.execution_count_)
return n
def Clear(self):
self.clear_task_name()
self.clear_eta_usec()
self.clear_url()
self.clear_method()
self.clear_retry_count()
self.clear_header()
self.clear_body_size()
self.clear_body()
self.clear_creation_time_usec()
self.clear_crontimetable()
self.clear_runlog()
self.clear_description()
self.clear_payload()
self.clear_retry_parameters()
self.clear_first_try_usec()
self.clear_tag()
self.clear_execution_count()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_url_):
out.putVarInt32(34)
out.putPrefixedString(self.url_)
if (self.has_method_):
out.putVarInt32(40)
out.putVarInt32(self.method_)
if (self.has_retry_count_):
out.putVarInt32(48)
out.putVarInt32(self.retry_count_)
for i in xrange(len(self.header_)):
out.putVarInt32(59)
self.header_[i].OutputUnchecked(out)
out.putVarInt32(60)
if (self.has_body_size_):
out.putVarInt32(80)
out.putVarInt32(self.body_size_)
if (self.has_body_):
out.putVarInt32(90)
out.putPrefixedString(self.body_)
out.putVarInt32(96)
out.putVarInt64(self.creation_time_usec_)
if (self.has_crontimetable_):
out.putVarInt32(107)
self.crontimetable_.OutputUnchecked(out)
out.putVarInt32(108)
if (self.has_runlog_):
out.putVarInt32(131)
self.runlog_.OutputUnchecked(out)
out.putVarInt32(132)
if (self.has_description_):
out.putVarInt32(170)
out.putPrefixedString(self.description_)
if (self.has_payload_):
out.putVarInt32(178)
out.putVarInt32(self.payload_.ByteSize())
self.payload_.OutputUnchecked(out)
if (self.has_retry_parameters_):
out.putVarInt32(186)
out.putVarInt32(self.retry_parameters_.ByteSize())
self.retry_parameters_.OutputUnchecked(out)
if (self.has_first_try_usec_):
out.putVarInt32(192)
out.putVarInt64(self.first_try_usec_)
if (self.has_tag_):
out.putVarInt32(202)
out.putPrefixedString(self.tag_)
if (self.has_execution_count_):
out.putVarInt32(208)
out.putVarInt32(self.execution_count_)
def OutputPartial(self, out):
if (self.has_task_name_):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
if (self.has_eta_usec_):
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_url_):
out.putVarInt32(34)
out.putPrefixedString(self.url_)
if (self.has_method_):
out.putVarInt32(40)
out.putVarInt32(self.method_)
if (self.has_retry_count_):
out.putVarInt32(48)
out.putVarInt32(self.retry_count_)
for i in xrange(len(self.header_)):
out.putVarInt32(59)
self.header_[i].OutputPartial(out)
out.putVarInt32(60)
if (self.has_body_size_):
out.putVarInt32(80)
out.putVarInt32(self.body_size_)
if (self.has_body_):
out.putVarInt32(90)
out.putPrefixedString(self.body_)
if (self.has_creation_time_usec_):
out.putVarInt32(96)
out.putVarInt64(self.creation_time_usec_)
if (self.has_crontimetable_):
out.putVarInt32(107)
self.crontimetable_.OutputPartial(out)
out.putVarInt32(108)
if (self.has_runlog_):
out.putVarInt32(131)
self.runlog_.OutputPartial(out)
out.putVarInt32(132)
if (self.has_description_):
out.putVarInt32(170)
out.putPrefixedString(self.description_)
if (self.has_payload_):
out.putVarInt32(178)
out.putVarInt32(self.payload_.ByteSizePartial())
self.payload_.OutputPartial(out)
if (self.has_retry_parameters_):
out.putVarInt32(186)
out.putVarInt32(self.retry_parameters_.ByteSizePartial())
self.retry_parameters_.OutputPartial(out)
if (self.has_first_try_usec_):
out.putVarInt32(192)
out.putVarInt64(self.first_try_usec_)
if (self.has_tag_):
out.putVarInt32(202)
out.putPrefixedString(self.tag_)
if (self.has_execution_count_):
out.putVarInt32(208)
out.putVarInt32(self.execution_count_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_task_name(d.getPrefixedString())
continue
if tt == 24:
self.set_eta_usec(d.getVarInt64())
continue
if tt == 34:
self.set_url(d.getPrefixedString())
continue
if tt == 40:
self.set_method(d.getVarInt32())
continue
if tt == 48:
self.set_retry_count(d.getVarInt32())
continue
if tt == 59:
self.add_header().TryMerge(d)
continue
if tt == 80:
self.set_body_size(d.getVarInt32())
continue
if tt == 90:
self.set_body(d.getPrefixedString())
continue
if tt == 96:
self.set_creation_time_usec(d.getVarInt64())
continue
if tt == 107:
self.mutable_crontimetable().TryMerge(d)
continue
if tt == 131:
self.mutable_runlog().TryMerge(d)
continue
if tt == 170:
self.set_description(d.getPrefixedString())
continue
if tt == 178:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_payload().TryMerge(tmp)
continue
if tt == 186:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_retry_parameters().TryMerge(tmp)
continue
if tt == 192:
self.set_first_try_usec(d.getVarInt64())
continue
if tt == 202:
self.set_tag(d.getPrefixedString())
continue
if tt == 208:
self.set_execution_count(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_eta_usec_: res+=prefix+("eta_usec: %s\n" % self.DebugFormatInt64(self.eta_usec_))
if self.has_url_: res+=prefix+("url: %s\n" % self.DebugFormatString(self.url_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatInt32(self.method_))
if self.has_retry_count_: res+=prefix+("retry_count: %s\n" % self.DebugFormatInt32(self.retry_count_))
cnt=0
for e in self.header_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Header%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_body_size_: res+=prefix+("body_size: %s\n" % self.DebugFormatInt32(self.body_size_))
if self.has_body_: res+=prefix+("body: %s\n" % self.DebugFormatString(self.body_))
if self.has_creation_time_usec_: res+=prefix+("creation_time_usec: %s\n" % self.DebugFormatInt64(self.creation_time_usec_))
if self.has_crontimetable_:
res+=prefix+"CronTimetable {\n"
res+=self.crontimetable_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_runlog_:
res+=prefix+"RunLog {\n"
res+=self.runlog_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_description_: res+=prefix+("description: %s\n" % self.DebugFormatString(self.description_))
if self.has_payload_:
res+=prefix+"payload <\n"
res+=self.payload_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_retry_parameters_:
res+=prefix+"retry_parameters <\n"
res+=self.retry_parameters_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_first_try_usec_: res+=prefix+("first_try_usec: %s\n" % self.DebugFormatInt64(self.first_try_usec_))
if self.has_tag_: res+=prefix+("tag: %s\n" % self.DebugFormatString(self.tag_))
if self.has_execution_count_: res+=prefix+("execution_count: %s\n" % self.DebugFormatInt32(self.execution_count_))
return res
class TaskQueueQueryTasksResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.task_ = []
if contents is not None: self.MergeFromString(contents)
def task_size(self): return len(self.task_)
def task_list(self): return self.task_
def task(self, i):
return self.task_[i]
def mutable_task(self, i):
return self.task_[i]
def add_task(self):
x = TaskQueueQueryTasksResponse_Task()
self.task_.append(x)
return x
def clear_task(self):
self.task_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.task_size()): self.add_task().CopyFrom(x.task(i))
def Equals(self, x):
if x is self: return 1
if len(self.task_) != len(x.task_): return 0
for e1, e2 in zip(self.task_, x.task_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.task_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.task_)
for i in xrange(len(self.task_)): n += self.task_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.task_)
for i in xrange(len(self.task_)): n += self.task_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_task()
def OutputUnchecked(self, out):
for i in xrange(len(self.task_)):
out.putVarInt32(11)
self.task_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.task_)):
out.putVarInt32(11)
self.task_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_task().TryMerge(d)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.task_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Task%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kTaskGroup = 1
kTasktask_name = 2
kTasketa_usec = 3
kTaskurl = 4
kTaskmethod = 5
kTaskretry_count = 6
kTaskHeaderGroup = 7
kTaskHeaderkey = 8
kTaskHeadervalue = 9
kTaskbody_size = 10
kTaskbody = 11
kTaskcreation_time_usec = 12
kTaskCronTimetableGroup = 13
kTaskCronTimetableschedule = 14
kTaskCronTimetabletimezone = 15
kTaskRunLogGroup = 16
kTaskRunLogdispatched_usec = 17
kTaskRunLoglag_usec = 18
kTaskRunLogelapsed_usec = 19
kTaskRunLogresponse_code = 20
kTaskRunLogretry_reason = 27
kTaskdescription = 21
kTaskpayload = 22
kTaskretry_parameters = 23
kTaskfirst_try_usec = 24
kTasktag = 25
kTaskexecution_count = 26
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Task",
2: "task_name",
3: "eta_usec",
4: "url",
5: "method",
6: "retry_count",
7: "Header",
8: "key",
9: "value",
10: "body_size",
11: "body",
12: "creation_time_usec",
13: "CronTimetable",
14: "schedule",
15: "timezone",
16: "RunLog",
17: "dispatched_usec",
18: "lag_usec",
19: "elapsed_usec",
20: "response_code",
21: "description",
22: "payload",
23: "retry_parameters",
24: "first_try_usec",
25: "tag",
26: "execution_count",
27: "retry_reason",
}, 27)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.STARTGROUP,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.STARTGROUP,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STARTGROUP,
17: ProtocolBuffer.Encoder.NUMERIC,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.NUMERIC,
20: ProtocolBuffer.Encoder.NUMERIC,
21: ProtocolBuffer.Encoder.STRING,
22: ProtocolBuffer.Encoder.STRING,
23: ProtocolBuffer.Encoder.STRING,
24: ProtocolBuffer.Encoder.NUMERIC,
25: ProtocolBuffer.Encoder.STRING,
26: ProtocolBuffer.Encoder.NUMERIC,
27: ProtocolBuffer.Encoder.STRING,
}, 27, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueQueryTasksResponse'
class TaskQueueFetchTaskRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_queue_name_ = 0
queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
n += self.lengthString(len(self.queue_name_))
n += self.lengthString(len(self.task_name_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_queue_name()
self.clear_task_name()
def OutputUnchecked(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(26)
out.putPrefixedString(self.task_name_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_queue_name_):
out.putVarInt32(18)
out.putPrefixedString(self.queue_name_)
if (self.has_task_name_):
out.putVarInt32(26)
out.putPrefixedString(self.task_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 26:
self.set_task_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kqueue_name = 2
ktask_name = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "queue_name",
3: "task_name",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchTaskRequest'
class TaskQueueFetchTaskResponse(ProtocolBuffer.ProtocolMessage):
has_task_ = 0
def __init__(self, contents=None):
self.task_ = TaskQueueQueryTasksResponse()
if contents is not None: self.MergeFromString(contents)
def task(self): return self.task_
def mutable_task(self): self.has_task_ = 1; return self.task_
def clear_task(self):self.has_task_ = 0; self.task_.Clear()
def has_task(self): return self.has_task_
def MergeFrom(self, x):
assert x is not self
if (x.has_task()): self.mutable_task().MergeFrom(x.task())
def Equals(self, x):
if x is self: return 1
if self.has_task_ != x.has_task_: return 0
if self.has_task_ and self.task_ != x.task_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_task_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task not set.')
elif not self.task_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.task_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_task_):
n += 1
n += self.lengthString(self.task_.ByteSizePartial())
return n
def Clear(self):
self.clear_task()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.task_.ByteSize())
self.task_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_task_):
out.putVarInt32(10)
out.putVarInt32(self.task_.ByteSizePartial())
self.task_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_task().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_task_:
res+=prefix+"task <\n"
res+=self.task_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktask = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "task",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueFetchTaskResponse'
class TaskQueueUpdateStorageLimitRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_limit_ = 0
limit_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 0
def has_limit(self): return self.has_limit_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_limit()): self.set_limit(x.limit())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_limit_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: limit not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += self.lengthVarInt64(self.limit_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_limit_):
n += 1
n += self.lengthVarInt64(self.limit_)
return n
def Clear(self):
self.clear_app_id()
self.clear_limit()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(16)
out.putVarInt64(self.limit_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt64(self.limit_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 16:
self.set_limit(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt64(self.limit_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
klimit = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "limit",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueUpdateStorageLimitRequest'
class TaskQueueUpdateStorageLimitResponse(ProtocolBuffer.ProtocolMessage):
has_new_limit_ = 0
new_limit_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def new_limit(self): return self.new_limit_
def set_new_limit(self, x):
self.has_new_limit_ = 1
self.new_limit_ = x
def clear_new_limit(self):
if self.has_new_limit_:
self.has_new_limit_ = 0
self.new_limit_ = 0
def has_new_limit(self): return self.has_new_limit_
def MergeFrom(self, x):
assert x is not self
if (x.has_new_limit()): self.set_new_limit(x.new_limit())
def Equals(self, x):
if x is self: return 1
if self.has_new_limit_ != x.has_new_limit_: return 0
if self.has_new_limit_ and self.new_limit_ != x.new_limit_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_new_limit_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: new_limit not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.new_limit_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_new_limit_):
n += 1
n += self.lengthVarInt64(self.new_limit_)
return n
def Clear(self):
self.clear_new_limit()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.new_limit_)
def OutputPartial(self, out):
if (self.has_new_limit_):
out.putVarInt32(8)
out.putVarInt64(self.new_limit_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_new_limit(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_new_limit_: res+=prefix+("new_limit: %s\n" % self.DebugFormatInt64(self.new_limit_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
knew_limit = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "new_limit",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueUpdateStorageLimitResponse'
class TaskQueueQueryAndOwnTasksRequest(ProtocolBuffer.ProtocolMessage):
has_queue_name_ = 0
queue_name_ = ""
has_lease_seconds_ = 0
lease_seconds_ = 0.0
has_max_tasks_ = 0
max_tasks_ = 0
has_group_by_tag_ = 0
group_by_tag_ = 0
has_tag_ = 0
tag_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def lease_seconds(self): return self.lease_seconds_
def set_lease_seconds(self, x):
self.has_lease_seconds_ = 1
self.lease_seconds_ = x
def clear_lease_seconds(self):
if self.has_lease_seconds_:
self.has_lease_seconds_ = 0
self.lease_seconds_ = 0.0
def has_lease_seconds(self): return self.has_lease_seconds_
def max_tasks(self): return self.max_tasks_
def set_max_tasks(self, x):
self.has_max_tasks_ = 1
self.max_tasks_ = x
def clear_max_tasks(self):
if self.has_max_tasks_:
self.has_max_tasks_ = 0
self.max_tasks_ = 0
def has_max_tasks(self): return self.has_max_tasks_
def group_by_tag(self): return self.group_by_tag_
def set_group_by_tag(self, x):
self.has_group_by_tag_ = 1
self.group_by_tag_ = x
def clear_group_by_tag(self):
if self.has_group_by_tag_:
self.has_group_by_tag_ = 0
self.group_by_tag_ = 0
def has_group_by_tag(self): return self.has_group_by_tag_
def tag(self): return self.tag_
def set_tag(self, x):
self.has_tag_ = 1
self.tag_ = x
def clear_tag(self):
if self.has_tag_:
self.has_tag_ = 0
self.tag_ = ""
def has_tag(self): return self.has_tag_
def MergeFrom(self, x):
assert x is not self
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_lease_seconds()): self.set_lease_seconds(x.lease_seconds())
if (x.has_max_tasks()): self.set_max_tasks(x.max_tasks())
if (x.has_group_by_tag()): self.set_group_by_tag(x.group_by_tag())
if (x.has_tag()): self.set_tag(x.tag())
def Equals(self, x):
if x is self: return 1
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_lease_seconds_ != x.has_lease_seconds_: return 0
if self.has_lease_seconds_ and self.lease_seconds_ != x.lease_seconds_: return 0
if self.has_max_tasks_ != x.has_max_tasks_: return 0
if self.has_max_tasks_ and self.max_tasks_ != x.max_tasks_: return 0
if self.has_group_by_tag_ != x.has_group_by_tag_: return 0
if self.has_group_by_tag_ and self.group_by_tag_ != x.group_by_tag_: return 0
if self.has_tag_ != x.has_tag_: return 0
if self.has_tag_ and self.tag_ != x.tag_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_lease_seconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lease_seconds not set.')
if (not self.has_max_tasks_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: max_tasks not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.queue_name_))
n += self.lengthVarInt64(self.max_tasks_)
if (self.has_group_by_tag_): n += 2
if (self.has_tag_): n += 1 + self.lengthString(len(self.tag_))
return n + 11
def ByteSizePartial(self):
n = 0
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_lease_seconds_):
n += 9
if (self.has_max_tasks_):
n += 1
n += self.lengthVarInt64(self.max_tasks_)
if (self.has_group_by_tag_): n += 2
if (self.has_tag_): n += 1 + self.lengthString(len(self.tag_))
return n
def Clear(self):
self.clear_queue_name()
self.clear_lease_seconds()
self.clear_max_tasks()
self.clear_group_by_tag()
self.clear_tag()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(17)
out.putDouble(self.lease_seconds_)
out.putVarInt32(24)
out.putVarInt64(self.max_tasks_)
if (self.has_group_by_tag_):
out.putVarInt32(32)
out.putBoolean(self.group_by_tag_)
if (self.has_tag_):
out.putVarInt32(42)
out.putPrefixedString(self.tag_)
def OutputPartial(self, out):
if (self.has_queue_name_):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
if (self.has_lease_seconds_):
out.putVarInt32(17)
out.putDouble(self.lease_seconds_)
if (self.has_max_tasks_):
out.putVarInt32(24)
out.putVarInt64(self.max_tasks_)
if (self.has_group_by_tag_):
out.putVarInt32(32)
out.putBoolean(self.group_by_tag_)
if (self.has_tag_):
out.putVarInt32(42)
out.putPrefixedString(self.tag_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 17:
self.set_lease_seconds(d.getDouble())
continue
if tt == 24:
self.set_max_tasks(d.getVarInt64())
continue
if tt == 32:
self.set_group_by_tag(d.getBoolean())
continue
if tt == 42:
self.set_tag(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_lease_seconds_: res+=prefix+("lease_seconds: %s\n" % self.DebugFormat(self.lease_seconds_))
if self.has_max_tasks_: res+=prefix+("max_tasks: %s\n" % self.DebugFormatInt64(self.max_tasks_))
if self.has_group_by_tag_: res+=prefix+("group_by_tag: %s\n" % self.DebugFormatBool(self.group_by_tag_))
if self.has_tag_: res+=prefix+("tag: %s\n" % self.DebugFormatString(self.tag_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kqueue_name = 1
klease_seconds = 2
kmax_tasks = 3
kgroup_by_tag = 4
ktag = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "queue_name",
2: "lease_seconds",
3: "max_tasks",
4: "group_by_tag",
5: "tag",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.DOUBLE,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueQueryAndOwnTasksRequest'
class TaskQueueQueryAndOwnTasksResponse_Task(ProtocolBuffer.ProtocolMessage):
has_task_name_ = 0
task_name_ = ""
has_eta_usec_ = 0
eta_usec_ = 0
has_retry_count_ = 0
retry_count_ = 0
has_body_ = 0
body_ = ""
has_tag_ = 0
tag_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def eta_usec(self): return self.eta_usec_
def set_eta_usec(self, x):
self.has_eta_usec_ = 1
self.eta_usec_ = x
def clear_eta_usec(self):
if self.has_eta_usec_:
self.has_eta_usec_ = 0
self.eta_usec_ = 0
def has_eta_usec(self): return self.has_eta_usec_
def retry_count(self): return self.retry_count_
def set_retry_count(self, x):
self.has_retry_count_ = 1
self.retry_count_ = x
def clear_retry_count(self):
if self.has_retry_count_:
self.has_retry_count_ = 0
self.retry_count_ = 0
def has_retry_count(self): return self.has_retry_count_
def body(self): return self.body_
def set_body(self, x):
self.has_body_ = 1
self.body_ = x
def clear_body(self):
if self.has_body_:
self.has_body_ = 0
self.body_ = ""
def has_body(self): return self.has_body_
def tag(self): return self.tag_
def set_tag(self, x):
self.has_tag_ = 1
self.tag_ = x
def clear_tag(self):
if self.has_tag_:
self.has_tag_ = 0
self.tag_ = ""
def has_tag(self): return self.has_tag_
def MergeFrom(self, x):
assert x is not self
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_eta_usec()): self.set_eta_usec(x.eta_usec())
if (x.has_retry_count()): self.set_retry_count(x.retry_count())
if (x.has_body()): self.set_body(x.body())
if (x.has_tag()): self.set_tag(x.tag())
def Equals(self, x):
if x is self: return 1
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_eta_usec_ != x.has_eta_usec_: return 0
if self.has_eta_usec_ and self.eta_usec_ != x.eta_usec_: return 0
if self.has_retry_count_ != x.has_retry_count_: return 0
if self.has_retry_count_ and self.retry_count_ != x.retry_count_: return 0
if self.has_body_ != x.has_body_: return 0
if self.has_body_ and self.body_ != x.body_: return 0
if self.has_tag_ != x.has_tag_: return 0
if self.has_tag_ and self.tag_ != x.tag_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
if (not self.has_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: eta_usec not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.task_name_))
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_retry_count_): n += 1 + self.lengthVarInt64(self.retry_count_)
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
if (self.has_tag_): n += 1 + self.lengthString(len(self.tag_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
if (self.has_eta_usec_):
n += 1
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_retry_count_): n += 1 + self.lengthVarInt64(self.retry_count_)
if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
if (self.has_tag_): n += 1 + self.lengthString(len(self.tag_))
return n
def Clear(self):
self.clear_task_name()
self.clear_eta_usec()
self.clear_retry_count()
self.clear_body()
self.clear_tag()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_retry_count_):
out.putVarInt32(32)
out.putVarInt32(self.retry_count_)
if (self.has_body_):
out.putVarInt32(42)
out.putPrefixedString(self.body_)
if (self.has_tag_):
out.putVarInt32(50)
out.putPrefixedString(self.tag_)
def OutputPartial(self, out):
if (self.has_task_name_):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
if (self.has_eta_usec_):
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_retry_count_):
out.putVarInt32(32)
out.putVarInt32(self.retry_count_)
if (self.has_body_):
out.putVarInt32(42)
out.putPrefixedString(self.body_)
if (self.has_tag_):
out.putVarInt32(50)
out.putPrefixedString(self.tag_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_task_name(d.getPrefixedString())
continue
if tt == 24:
self.set_eta_usec(d.getVarInt64())
continue
if tt == 32:
self.set_retry_count(d.getVarInt32())
continue
if tt == 42:
self.set_body(d.getPrefixedString())
continue
if tt == 50:
self.set_tag(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_eta_usec_: res+=prefix+("eta_usec: %s\n" % self.DebugFormatInt64(self.eta_usec_))
if self.has_retry_count_: res+=prefix+("retry_count: %s\n" % self.DebugFormatInt32(self.retry_count_))
if self.has_body_: res+=prefix+("body: %s\n" % self.DebugFormatString(self.body_))
if self.has_tag_: res+=prefix+("tag: %s\n" % self.DebugFormatString(self.tag_))
return res
class TaskQueueQueryAndOwnTasksResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.task_ = []
if contents is not None: self.MergeFromString(contents)
def task_size(self): return len(self.task_)
def task_list(self): return self.task_
def task(self, i):
return self.task_[i]
def mutable_task(self, i):
return self.task_[i]
def add_task(self):
x = TaskQueueQueryAndOwnTasksResponse_Task()
self.task_.append(x)
return x
def clear_task(self):
self.task_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.task_size()): self.add_task().CopyFrom(x.task(i))
def Equals(self, x):
if x is self: return 1
if len(self.task_) != len(x.task_): return 0
for e1, e2 in zip(self.task_, x.task_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.task_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.task_)
for i in xrange(len(self.task_)): n += self.task_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.task_)
for i in xrange(len(self.task_)): n += self.task_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_task()
def OutputUnchecked(self, out):
for i in xrange(len(self.task_)):
out.putVarInt32(11)
self.task_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.task_)):
out.putVarInt32(11)
self.task_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_task().TryMerge(d)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.task_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Task%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kTaskGroup = 1
kTasktask_name = 2
kTasketa_usec = 3
kTaskretry_count = 4
kTaskbody = 5
kTasktag = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Task",
2: "task_name",
3: "eta_usec",
4: "retry_count",
5: "body",
6: "tag",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueQueryAndOwnTasksResponse'
class TaskQueueModifyTaskLeaseRequest(ProtocolBuffer.ProtocolMessage):
has_queue_name_ = 0
queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
has_eta_usec_ = 0
eta_usec_ = 0
has_lease_seconds_ = 0
lease_seconds_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def queue_name(self): return self.queue_name_
def set_queue_name(self, x):
self.has_queue_name_ = 1
self.queue_name_ = x
def clear_queue_name(self):
if self.has_queue_name_:
self.has_queue_name_ = 0
self.queue_name_ = ""
def has_queue_name(self): return self.has_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def eta_usec(self): return self.eta_usec_
def set_eta_usec(self, x):
self.has_eta_usec_ = 1
self.eta_usec_ = x
def clear_eta_usec(self):
if self.has_eta_usec_:
self.has_eta_usec_ = 0
self.eta_usec_ = 0
def has_eta_usec(self): return self.has_eta_usec_
def lease_seconds(self): return self.lease_seconds_
def set_lease_seconds(self, x):
self.has_lease_seconds_ = 1
self.lease_seconds_ = x
def clear_lease_seconds(self):
if self.has_lease_seconds_:
self.has_lease_seconds_ = 0
self.lease_seconds_ = 0.0
def has_lease_seconds(self): return self.has_lease_seconds_
def MergeFrom(self, x):
assert x is not self
if (x.has_queue_name()): self.set_queue_name(x.queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_eta_usec()): self.set_eta_usec(x.eta_usec())
if (x.has_lease_seconds()): self.set_lease_seconds(x.lease_seconds())
def Equals(self, x):
if x is self: return 1
if self.has_queue_name_ != x.has_queue_name_: return 0
if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_eta_usec_ != x.has_eta_usec_: return 0
if self.has_eta_usec_ and self.eta_usec_ != x.eta_usec_: return 0
if self.has_lease_seconds_ != x.has_lease_seconds_: return 0
if self.has_lease_seconds_ and self.lease_seconds_ != x.lease_seconds_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_queue_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: queue_name not set.')
if (not self.has_task_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: task_name not set.')
if (not self.has_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: eta_usec not set.')
if (not self.has_lease_seconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lease_seconds not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.queue_name_))
n += self.lengthString(len(self.task_name_))
n += self.lengthVarInt64(self.eta_usec_)
return n + 12
def ByteSizePartial(self):
n = 0
if (self.has_queue_name_):
n += 1
n += self.lengthString(len(self.queue_name_))
if (self.has_task_name_):
n += 1
n += self.lengthString(len(self.task_name_))
if (self.has_eta_usec_):
n += 1
n += self.lengthVarInt64(self.eta_usec_)
if (self.has_lease_seconds_):
n += 9
return n
def Clear(self):
self.clear_queue_name()
self.clear_task_name()
self.clear_eta_usec()
self.clear_lease_seconds()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
out.putVarInt32(33)
out.putDouble(self.lease_seconds_)
def OutputPartial(self, out):
if (self.has_queue_name_):
out.putVarInt32(10)
out.putPrefixedString(self.queue_name_)
if (self.has_task_name_):
out.putVarInt32(18)
out.putPrefixedString(self.task_name_)
if (self.has_eta_usec_):
out.putVarInt32(24)
out.putVarInt64(self.eta_usec_)
if (self.has_lease_seconds_):
out.putVarInt32(33)
out.putDouble(self.lease_seconds_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_queue_name(d.getPrefixedString())
continue
if tt == 18:
self.set_task_name(d.getPrefixedString())
continue
if tt == 24:
self.set_eta_usec(d.getVarInt64())
continue
if tt == 33:
self.set_lease_seconds(d.getDouble())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_eta_usec_: res+=prefix+("eta_usec: %s\n" % self.DebugFormatInt64(self.eta_usec_))
if self.has_lease_seconds_: res+=prefix+("lease_seconds: %s\n" % self.DebugFormat(self.lease_seconds_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kqueue_name = 1
ktask_name = 2
keta_usec = 3
klease_seconds = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "queue_name",
2: "task_name",
3: "eta_usec",
4: "lease_seconds",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.DOUBLE,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueModifyTaskLeaseRequest'
class TaskQueueModifyTaskLeaseResponse(ProtocolBuffer.ProtocolMessage):
has_updated_eta_usec_ = 0
updated_eta_usec_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def updated_eta_usec(self): return self.updated_eta_usec_
def set_updated_eta_usec(self, x):
self.has_updated_eta_usec_ = 1
self.updated_eta_usec_ = x
def clear_updated_eta_usec(self):
if self.has_updated_eta_usec_:
self.has_updated_eta_usec_ = 0
self.updated_eta_usec_ = 0
def has_updated_eta_usec(self): return self.has_updated_eta_usec_
def MergeFrom(self, x):
assert x is not self
if (x.has_updated_eta_usec()): self.set_updated_eta_usec(x.updated_eta_usec())
def Equals(self, x):
if x is self: return 1
if self.has_updated_eta_usec_ != x.has_updated_eta_usec_: return 0
if self.has_updated_eta_usec_ and self.updated_eta_usec_ != x.updated_eta_usec_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_updated_eta_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: updated_eta_usec not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.updated_eta_usec_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_updated_eta_usec_):
n += 1
n += self.lengthVarInt64(self.updated_eta_usec_)
return n
def Clear(self):
self.clear_updated_eta_usec()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.updated_eta_usec_)
def OutputPartial(self, out):
if (self.has_updated_eta_usec_):
out.putVarInt32(8)
out.putVarInt64(self.updated_eta_usec_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_updated_eta_usec(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_updated_eta_usec_: res+=prefix+("updated_eta_usec: %s\n" % self.DebugFormatInt64(self.updated_eta_usec_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kupdated_eta_usec = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "updated_eta_usec",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.TaskQueueModifyTaskLeaseResponse'
if _extension_runtime:
pass
__all__ = ['TaskQueueServiceError','TaskQueueRetryParameters','TaskQueueAcl','TaskQueueHttpHeader','TaskQueueMode','TaskQueueAddRequest','TaskQueueAddRequest_Header','TaskQueueAddRequest_CronTimetable','TaskQueueAddResponse','TaskQueueBulkAddRequest','TaskQueueBulkAddResponse','TaskQueueBulkAddResponse_TaskResult','TaskQueueDeleteRequest','TaskQueueDeleteResponse','TaskQueueForceRunRequest','TaskQueueForceRunResponse','TaskQueueUpdateQueueRequest','TaskQueueUpdateQueueResponse','TaskQueueFetchQueuesRequest','TaskQueueFetchQueuesResponse','TaskQueueFetchQueuesResponse_Queue','TaskQueueFetchQueueStatsRequest','TaskQueueScannerQueueInfo','TaskQueueFetchQueueStatsResponse','TaskQueueFetchQueueStatsResponse_QueueStats','TaskQueuePauseQueueRequest','TaskQueuePauseQueueResponse','TaskQueuePurgeQueueRequest','TaskQueuePurgeQueueResponse','TaskQueueDeleteQueueRequest','TaskQueueDeleteQueueResponse','TaskQueueDeleteGroupRequest','TaskQueueDeleteGroupResponse','TaskQueueQueryTasksRequest','TaskQueueQueryTasksResponse','TaskQueueQueryTasksResponse_TaskHeader','TaskQueueQueryTasksResponse_TaskCronTimetable','TaskQueueQueryTasksResponse_TaskRunLog','TaskQueueQueryTasksResponse_Task','TaskQueueFetchTaskRequest','TaskQueueFetchTaskResponse','TaskQueueUpdateStorageLimitRequest','TaskQueueUpdateStorageLimitResponse','TaskQueueQueryAndOwnTasksRequest','TaskQueueQueryAndOwnTasksResponse','TaskQueueQueryAndOwnTasksResponse_Task','TaskQueueModifyTaskLeaseRequest','TaskQueueModifyTaskLeaseResponse']
| [
"[email protected]"
] | |
041299a89176a6f13777916bbe8296efdd7198f6 | f16294a2b14754a0a3be754b8f5c5384ccf108c9 | /EPAD/virtual/bin/gunicorn | e4445b255bdfe421b20479d7fe8cafab19cc3145 | [] | no_license | antomuli/Work_Pad | bb24aac8b81cb7d8ae9173d7e099ae46eaf0b09b | 7cff39d27102398dd4c484933b620e00562fa812 | refs/heads/master | 2022-12-10T11:44:10.855797 | 2020-04-05T18:43:59 | 2020-04-05T18:43:59 | 252,779,135 | 2 | 0 | null | 2022-12-08T03:59:54 | 2020-04-03T16:04:19 | Python | UTF-8 | Python | false | false | 293 | #!/home/moringaschool/Documents/moringa-schools-projects/Capstone/EPAD/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
e20baf3afedeae9e58e816b7163dd3d484d2c808 | 43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1 | /testforothers/testSentence.py | 568acfc3bc4ca9acd1e2c6968eb8c53ccff96a4c | [] | no_license | wiky2/mytestproject | f694cf71dd3031e4597086f3bc90d246c4b26298 | e7b79df6304476d76e87f9e8a262f304b30ca312 | refs/heads/master | 2021-09-07T20:54:19.569970 | 2018-02-28T23:39:00 | 2018-02-28T23:39:00 | 100,296,844 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,096 | py | #!/usr/bin/env python
# coding: utf-8
#声明必须放在前两行,# coding=<encoding name>
'''
@author:
@license:
@contact:
@software: Test
@file: testList2.py
@time: 2017/8/27 上午12:28
@desc:学习老王python-控制语句
python语句讲解
1.print语句
1.1 基本输出
1.2 print的逗号
1.2 输出到文件 >>为重定向
2.控制流语句(control flow)
2.1 由条件和执行代码块组成。
2.1.1 条件可分为决策、循环和分支
2.2 格式(冒号与4个空格永不忘)
2.3 if while for 函数,皆为contorl flow
3.布尔值
3.1 控制流与真假值息息相关
3.1.1 不要误解了真假与布尔值
3.2 布尔值的几个最基本运算符
3.2.1 and
3.2.2 or
3.2.3 is 检查共享
3.2.4 == 检查值
3.2.5 not
3.2.6 其他若干比较符号
4. if语句 (控制流语句)
4.1 if的组成 if else elif pass
4.1.1 if与elif替代了switch
4.1.2 pass
4.2 奇技淫巧 三元表达式
4.2.1 x if else
4.2.2 活用list
4.2.3 三元表达式玩玩就好
'''
f=open('printtest.txt','w')
print >>f,'hahahahaha'
print >>f,'hahahahaha'
f.close()
x=3
if x:
print 4
if x is True:
print 5# 一个是int ,一个是bool,两个不等,检查是否引用同一个数据对象
print True and False
print True & False
print 4 if True else 3
print [4,3][True]#[假的答案,真的答案][条件]
for x in 'i am lilei':
print x
for x in 'i am lilei'.split(' '):
print x
for x in 'i am lilei'.split(' '):
continue
print x
else:
print 'end-----'
print True and False and False and True#从左到右,遇到计算为False则为空。
a='aAsfASD'
print a.swapcase()#大写改小写
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
print ''.join([s for s in a if s.isdigit()])
a=a.lower()
print dict([(x,a.count(x)) for x in set(a)])#统计字符串的个数。
a_list=list(a)
set_list=list(set(a_list))
set_list.sort(key=a_list.index)#index是个函数
print ''.join(set_list)
print a[::-1]
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
l=sorted(a)
a_upper_list=[]
a_lower_list=[]
for x in l:
if x.isupper():
a_upper_list.append(x)
elif x.islower():
a_lower_list.append(x)
else:
pass
for y in a_upper_list:
y_lower=y.lower()
if y_lower in a_lower_list:
a_lower_list.insert(a_lower_list.index(y_lower),y)
print ''.join(a_lower_list)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
search='boy'
u=set(a)
u.update(list(search))
print len(set(a))== len(u)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
search=['boy','girl']
u=set(a)
for s in search:
u.update(list(s))
print len(set(a))== len(u)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
l=([(x,a.count(x)) for x in set(a)])#
l.sort(key=lambda k:k[1],reverse=True)#k[1]代表第二个键,从0开始
print l[0][0]
print l
import os
m=os.popen('python -m this').read()
m=m.replace('\n','')
l=m.split(' ')
print [(x,l.count(x)) for x in ['be','this','than']]
size=1023147201
print '%s kb' % (size >>10)
print '%s mb' % (size >>20)
a=[1,2,3,6,8,9,10,14,17]
print str(a)#[1, 2, 3, 6, 8, 9, 10, 14, 17]
print ''.join(str(a))
print str(a)[1:-1:3]#多位数不行
print str(a)[1:-1].replace(', ','')#先去方括号,再去逗号,空格
a={'key1':'value1','key2':'value2'}
for i in a.keys():
print i
a={'key1':'value1','key2':'value2'}
for x,y in a.items():
print x,y
a={'a':'haha','b':'xixi','d':'haha'}
search_value='haha'
key_list=[]
for x,y in a.items():
if y==search_value:
key_list.append(x)
print key_list
import string
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
a=''.join([x for x in a if not x.isdigit()])
print sorted(a,key=string.upper)
a='i am lilei. We need to go'
c=string.maketrans('i','I')#第一个参数,第二个参数,逐一对应。
b=a.translate(c,'lei')#第二个参数是要删除的参数,翻译后要赋值
print b
with open('printtest.txt','a') as g:#不需要自己关闭。
g.write('xixixi')
| [
"[email protected]"
] | |
53284561112646ecce78b97cfe2b53bf34de1289 | 324764c9dba09bb3c2c5af24db292cd27d9e81c8 | /2018/09/part1.py | 113581e23ee454494dc922ff94b6f92099555fe8 | [] | no_license | thatsokay/advent-of-code | cebcde362e7f0f4b0587e7e2c01d874e886dcd5e | 3781b9da93e3064f28f11f22db374cf896159488 | refs/heads/master | 2022-12-24T23:48:43.549678 | 2022-12-21T15:55:39 | 2022-12-22T05:04:45 | 161,293,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from operator import itemgetter
from collections import defaultdict, deque
def marbles(players, last):
circle = deque([0])
scores = defaultdict(int)
for marble in range(1, last + 1):
if marble % 23 == 0:
circle.rotate(7)
scores[marble % players + 1] += marble + circle.popleft()
else:
circle.rotate(-2)
circle.appendleft(marble)
return max(scores.values())
if __name__ == '__main__':
with open('input.txt') as f:
players, last = map(int, itemgetter(0, 6)(f.readline().split()))
print(marbles(players, last))
| [
"[email protected]"
] | |
728f58df08533b6cc626d7f82176ccdf3635a4ad | 11228a51cf7bfe3cef852efb0de393ae42c768f2 | /rules/genome.seq.smk | 85ff349ed2a8d402eb8f7c0436de7bbd72d5e6d6 | [
"MIT"
] | permissive | orionzhou/snk | cd36c6ced8fb514101b1de7c70f322e76368e760 | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | refs/heads/master | 2021-07-13T12:08:53.168670 | 2020-06-01T05:19:46 | 2020-06-01T05:19:46 | 133,903,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,282 | smk | def fasta_input(w):
genome = w.genome
if config['x'][genome]['hybrid']:
genome1, genome2 = genome.split('x')
return ["%s/%s/%s" % (g, config['db']['fasta']['xdir'], config['db']['fasta']['ref']) for g in (genome1, genome2)]
else:
return "%s/download/raw.fna" % genome
rule fasta:
input: fasta_input
output:
fna = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref']),
fai = "{genome}/%s/%s.fai" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref']),
chrom_size = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['chrom_size']),
chrom_bed = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['chrom_bed']),
gap = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['gap']),
fchain = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['fchain']),
bchain = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['bchain']),
params:
wdir = "{genome}",
odir = "08_seq_map",
opt = lambda w: w.genome,
N = "{genome}.%s" % config['fasta']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['fasta']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['fasta']['id']),
j = lambda w: get_resource(w, config, 'fasta'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'fasta')['ppn']
conda: "../envs/work.yml"
script: "../scripts/make_fasta.py"
rule blat_index:
input:
"{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref'])
output:
"{genome}/%s/%s" % (config['db']['blat']['xdir'], config['db']['blat']['x.2bit']),
"{genome}/%s/%s" % (config['db']['blat']['xdir'], config['db']['blat']['x.ooc']),
params:
odir = "{genome}/%s" % config['db']['blat']['xdir'],
N = "{genome}.%s" % config['blat_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['blat_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['blat_index']['id']),
j = lambda w: get_resource(w, config, 'blat_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'blat_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
faToTwoBit {input} {output[0]}
blat {output[0]} tmp.fas tmp.out -makeOoc={output[1]}
"""
rule blast_index:
input:
fna = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['lfna']),
faa = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['lfaa']),
output:
"{genome}/%s/%s" % (config['db']['blastn']['xdir'], config['db']['blastn']['xout']),
"{genome}/%s/%s" % (config['db']['blastp']['xdir'], config['db']['blastp']['xout']),
params:
odir1 = "{genome}/%s" % config['db']['blastn']['xdir'],
odir2 = "{genome}/%s" % config['db']['blastp']['xdir'],
N = "{genome}.%s" % config['blast_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['blast_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['blast_index']['id']),
j = lambda w: get_resource(w, config, 'blast_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'blast_index')['ppn']
conda: "../envs/blast.yml"
shell:
"""
makeblastdb -dbtype nucl -in {input.fna} -title db -out {params.odir1}/db
makeblastdb -dbtype prot -in {input.faa} -title db -out {params.odir2}/db
"""
rule last_index:
input:
fna = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['lfna']),
faa = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['lfaa']),
output:
"{genome}/%s/%s" % (config['db']['lastn']['xdir'], config['db']['lastn']['xout']),
"{genome}/%s/%s" % (config['db']['lastp']['xdir'], config['db']['lastp']['xout']),
params:
odir1 = "{genome}/%s" % config['db']['lastn']['xdir'],
odir2 = "{genome}/%s" % config['db']['lastp']['xdir'],
extra = "",
xpre1 = "{genome}/%s/%s" % (config['db']['lastn']['xdir'], config['db']['lastn']['xpre']),
xpre2 = "{genome}/%s/%s" % (config['db']['lastp']['xdir'], config['db']['lastp']['xpre']),
N = "{genome}.%s" % config['last_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['last_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['last_index']['id']),
j = lambda w: get_resource(w, config, 'last_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'last_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
lastdb {params.extra} {params.xpre1} {input.fna}
lastdb -p {params.extra} {params.xpre2} {input.faa}
"""
rule bwa_index:
input:
"{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref'])
output:
"{genome}/%s/%s" % (config['db']['bwa']['xdir'], config['db']['bwa']['xout'])
params:
odir = "{genome}/%s" % config['db']['bwa']['xdir'],
N = "{genome}.%s" % config['bwa_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['bwa_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['bwa_index']['id']),
j = lambda w: get_resource(w, config, 'bwa_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'bwa_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
bwa index -a bwtsw -p {params.odir}/db {input}
"""
rule bismark_index:
input:
"{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref'])
output:
"{genome}/%s/%s" % (config['db']['bismark']['xdir'], config['db']['bismark']['xout'])
params:
odir = "{genome}/%s" % config['db']['bismark']['xdir'],
parallel = lambda w: get_resource(w, config, 'bismark_index')['ppn'] / 2,
N = "{genome}.%s" % config['bismark_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['bismark_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['bismark_index']['id']),
j = lambda w: get_resource(w, config, 'bismark_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'bismark_index')['ppn']
conda: "../envs/bismark.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
cd {params.odir}
ln -sf ../../10_genome.fna db.fa
bismark_genome_preparation --bowtie2 .
"""
rule star_index:
input:
fna = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref']),
gtf = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['gtf'])
output:
"{genome}/%s/%s" % (config['db']['star']['xdir'], config['db']['star']['xout'])
params:
odir = "{genome}/%s" % config['db']['star']['xdir'],
N = "{genome}.%s" % config['star_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['star_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['star_index']['id']),
j = lambda w: get_resource(w, config, 'star_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'star_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
STAR --runThreadN {threads} --runMode genomeGenerate \
--genomeDir {params.odir}/ \
--genomeFastaFiles {input.fna} --sjdbGTFfile {input.gtf}
"""
rule gatk_index:
input:
"{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref'])
output:
"{genome}/%s/%s" % (config['db']['gatk']['xdir'], config['db']['gatk']['xref']),
"{genome}/%s/%s" % (config['db']['gatk']['xdir'], config['db']['gatk']['xref.dict']),
params:
odir = "{genome}/%s" % config['db']['gatk']['xdir'],
N = "{genome}.%s" % config['gatk_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['gatk_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['gatk_index']['id']),
j = lambda w: get_resource(w, config, 'gatk_index'),
mem = lambda w: get_resource(w, config, 'gatk_index')['mem'],
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'gatk_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
cp -f {input} {output[0]}
gatk CreateSequenceDictionary -R {output[0]}
samtools faidx {output[0]}
"""
rule hisat2_index:
input:
fna = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref']),
gtf = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['gtf'])
output:
"{genome}/%s/%s" % (config['db']['hisat2']['xdir'], config['db']['hisat2']['xout'])
params:
odir = "{genome}/%s" % config['db']['hisat2']['xdir'],
N = "{genome}.%s" % config['hisat2_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['hisat2_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['hisat2_index']['id']),
j = lambda w: get_resource(w, config, 'hisat2_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'hisat2_index')['ppn']
conda: "../envs/hisat2.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
hisat2_extract_exons.py {input.gtf} > {params.odir}/db.exon
hisat2_extract_splice_sites.py {input.gtf} > {params.odir}/db.ss
hisat2-build -p {threads} --ss {params.odir}/db.ss \
--exon {params.odir}/db.exon {input.fna} {params.odir}/db
"""
rule salmon_index:
input:
fna = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['fna'])
output:
"{genome}/%s/%s" % (config['db']['salmon']['xdir'], config['db']['salmon']['xout'])
params:
odir = "{genome}/%s" % config['db']['salmon']['xdir'],
pre = "{genome}/%s/%s" % (config['db']['salmon']['xdir'], config['db']['salmon']['xpre']),
N = "{genome}.%s" % config['salmon_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['salmon_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['salmon_index']['id']),
j = lambda w: get_resource(w, config, 'salmon_index'),
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'salmon_index')['ppn']
conda: "../envs/work.yml"
shell:
#cut -f1,2 10.tsv | sed '1d' | sort -k1,1 -k2,2 | uniq | awk 'BEGIN{FS="\t";OFS=","}{print $2 $1 $1}' > tx2gene.csv
"""
rm -rf {params.odir}
mkdir -p {params.odir}
salmon index -p {threads} -t {input.fna} --gencode -i {params.pre}
"""
rule snpeff_index:
input:
fna = "{genome}/%s/%s" % (config['db']['fasta']['xdir'], config['db']['fasta']['ref']),
gff = "{genome}/%s/%s" % (config['db']['annotation']['xdir'], config['db']['annotation']['lgff']),
output:
"{genome}/%s/%s" % (config['db']['snpeff']['xdir'], config['db']['snpeff']['xcfg']),
"{genome}/%s/{genome}/%s" % (config['db']['snpeff']['xdir'], config['db']['snpeff']['xout'])
params:
fna = lambda w, input: op.abspath(input.fna),
gff = lambda w, input: op.abspath(input.gff),
odir = "{genome}/%s" % config['db']['snpeff']['xdir'],
odir1 = "{genome}/%s/{genome}" % config['db']['snpeff']['xdir'],
N = "{genome}.%s" % config['snpeff_index']['id'],
e = "{genome}/%s/%s.e" % (config['dirj'], config['snpeff_index']['id']),
o = "{genome}/%s/%s.o" % (config['dirj'], config['snpeff_index']['id']),
j = lambda w: get_resource(w, config, 'snpeff_index'),
mem = lambda w: get_resource(w, config, 'snpeff_index')['mem'],
resources: attempt = lambda w, attempt: attempt
threads: lambda w: get_resource(w, config, 'snpeff_index')['ppn']
conda: "../envs/work.yml"
shell:
"""
rm -rf {params.odir}
mkdir -p {params.odir}
mkdir -p {params.odir1}
echo 'data.dir = .' > {output[0]}
echo '{wildcards.genome}.genome : Zea mays' >> {output[0]}
ln -sf {params.fna} {params.odir1}/sequences.fa
ln -sf {params.gff} {params.odir1}/genes.gff
snpEff -Xmx{params.mem}G build -c {output[0]} -gff3 -v {wildcards.genome}
"""
| [
"[email protected]"
] | |
b219d3a5ef11dd41e0bb63b460398506e79ab6a0 | 9905901a2beae3ff4885fbc29842b3c34546ffd7 | /nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/db/dbdbprofile.py | 3cd40c4dba9ac1ae941fcb40a81581dd9d95a1e6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | culbertm/NSttyPython | f354ebb3dbf445884dbddb474b34eb9246261c19 | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | refs/heads/master | 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,345 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dbdbprofile(base_resource) :
""" Configuration for DB profile resource. """
def __init__(self) :
self._name = None
self._interpretquery = None
self._stickiness = None
self._kcdaccount = None
self._conmultiplex = None
self._enablecachingconmuxoff = None
self._refcnt = None
self.___count = None
@property
def name(self) :
r"""Name for the database profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Cannot be changed after the profile is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my profile" or 'my profile'). .<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the database profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Cannot be changed after the profile is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my profile" or 'my profile'). .<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._name = name
except Exception as e:
raise e
@property
def interpretquery(self) :
r"""If ENABLED, inspect the query and update the connection information, if required. If DISABLED, forward the query to the server.<br/>Default value: YES<br/>Possible values = YES, NO.
"""
try :
return self._interpretquery
except Exception as e:
raise e
@interpretquery.setter
def interpretquery(self, interpretquery) :
r"""If ENABLED, inspect the query and update the connection information, if required. If DISABLED, forward the query to the server.<br/>Default value: YES<br/>Possible values = YES, NO
"""
try :
self._interpretquery = interpretquery
except Exception as e:
raise e
@property
def stickiness(self) :
r"""If the queries are related to each other, forward to the same backend server.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._stickiness
except Exception as e:
raise e
@stickiness.setter
def stickiness(self, stickiness) :
r"""If the queries are related to each other, forward to the same backend server.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._stickiness = stickiness
except Exception as e:
raise e
@property
def kcdaccount(self) :
r"""Name of the KCD account that is used for Windows authentication.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._kcdaccount
except Exception as e:
raise e
@kcdaccount.setter
def kcdaccount(self, kcdaccount) :
r"""Name of the KCD account that is used for Windows authentication.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._kcdaccount = kcdaccount
except Exception as e:
raise e
@property
def conmultiplex(self) :
r"""Use the same server-side connection for multiple client-side requests. Default is enabled.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._conmultiplex
except Exception as e:
raise e
@conmultiplex.setter
def conmultiplex(self, conmultiplex) :
r"""Use the same server-side connection for multiple client-side requests. Default is enabled.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._conmultiplex = conmultiplex
except Exception as e:
raise e
@property
def enablecachingconmuxoff(self) :
r"""Enable caching when connection multiplexing is OFF.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._enablecachingconmuxoff
except Exception as e:
raise e
@enablecachingconmuxoff.setter
def enablecachingconmuxoff(self, enablecachingconmuxoff) :
r"""Enable caching when connection multiplexing is OFF.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._enablecachingconmuxoff = enablecachingconmuxoff
except Exception as e:
raise e
@property
def refcnt(self) :
r"""Profile Reference Count.
"""
try :
return self._refcnt
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dbdbprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dbdbprofile
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add dbdbprofile.
"""
try :
if type(resource) is not list :
addresource = dbdbprofile()
addresource.name = resource.name
addresource.interpretquery = resource.interpretquery
addresource.stickiness = resource.stickiness
addresource.kcdaccount = resource.kcdaccount
addresource.conmultiplex = resource.conmultiplex
addresource.enablecachingconmuxoff = resource.enablecachingconmuxoff
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].interpretquery = resource[i].interpretquery
addresources[i].stickiness = resource[i].stickiness
addresources[i].kcdaccount = resource[i].kcdaccount
addresources[i].conmultiplex = resource[i].conmultiplex
addresources[i].enablecachingconmuxoff = resource[i].enablecachingconmuxoff
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete dbdbprofile.
"""
try :
if type(resource) is not list :
deleteresource = dbdbprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update dbdbprofile.
"""
try :
if type(resource) is not list :
updateresource = dbdbprofile()
updateresource.name = resource.name
updateresource.interpretquery = resource.interpretquery
updateresource.stickiness = resource.stickiness
updateresource.kcdaccount = resource.kcdaccount
updateresource.conmultiplex = resource.conmultiplex
updateresource.enablecachingconmuxoff = resource.enablecachingconmuxoff
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].interpretquery = resource[i].interpretquery
updateresources[i].stickiness = resource[i].stickiness
updateresources[i].kcdaccount = resource[i].kcdaccount
updateresources[i].conmultiplex = resource[i].conmultiplex
updateresources[i].enablecachingconmuxoff = resource[i].enablecachingconmuxoff
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of dbdbprofile resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = dbdbprofile()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ dbdbprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the dbdbprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = dbdbprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = dbdbprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [dbdbprofile() for _ in range(len(name))]
obj = [dbdbprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = dbdbprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of dbdbprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dbdbprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the dbdbprofile resources configured on NetScaler.
"""
try :
obj = dbdbprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of dbdbprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dbdbprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Conmultiplex:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Enablecachingconmuxoff:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Stickiness:
YES = "YES"
NO = "NO"
class Interpretquery:
YES = "YES"
NO = "NO"
class dbdbprofile_response(base_response) :
def __init__(self, length=1) :
self.dbdbprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dbdbprofile = [dbdbprofile() for _ in range(length)]
| [
"[email protected]"
] | |
a37487f3fd56ae30f901da031243cd773ef35b7d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq3223.py | e080a241bb5691ff2e6ed02cf7ee28f88d4b9702 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=45
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[3])) # number=36
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[1],input_qubit[3])) # number=17
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=37
c.append(cirq.Z.on(input_qubit[1])) # number=35
c.append(cirq.Y.on(input_qubit[3])) # number=38
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.H.on(input_qubit[3])) # number=26
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=42
c.append(cirq.X.on(input_qubit[3])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=44
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=29
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=30
c.append(cirq.H.on(input_qubit[2])) # number=31
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[2])) # number=39
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=40
c.append(cirq.H.on(input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=14
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3223.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
90b6785c7f5223d1dc216cddaf8db8fd5e3b3c37 | f2bec1dbb86b218fc1b7c9106ff13c15dea8c301 | /Interactive Tower of Hanoi/column/column.py | bd3deef5fada044b704a88cf513f425691695c0d | [] | no_license | PuffyShoggoth/hatch | 59e0f3684f041846084316f5bfafda1601cf5d2e | e1b32787cb0571469cd06a469b24890e23b78a58 | refs/heads/master | 2021-01-01T16:52:57.758161 | 2017-07-28T18:47:47 | 2017-07-28T18:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | class Column:
def __init__(self, position, disks, C):
self.position = position
self.disks = disks
self.ids = []
for k in range(len(self.disks)):
self.ids.append(self.display_disk(self.disks[k], C, k))
def can_add(self, column):
return len(self.disks)==0 or (len(column.disks)>0 and self.disks[-1] > column.disks[-1])
def add(self, disk, C):
k = len(self.disks)
self.ids.append(self.display_disk(disk, C, k))
self.disks.append(disk)
def remove(self, C):
C.delete(self.ids.pop())
return self.disks.pop()
def clear_all(self, C):
while len(self.ids) > 0:
C.delete(self.ids.pop())
self.disks.pop()
def display_disk(self, disk, C, k, currfill=""):
sz = disk.size/2
# height = C.winfo_height()
# height = 300
return C.create_rectangle(self.position-sz, 600-k*30, self.position+sz, 600-(k+1)*30, fill=currfill)
#def display(self, C):
#C.create_rectangle(0, 200, 200, 0, fill = "black")
# sz = self.disks[k]/2
# height = C.winfo_height()
# C.create_rectangle(self.position-sz, height-k*10, self.position+sz, height-(k+1)*10, fill="black")
| [
"[email protected]"
] | |
8e839a17907ffc5edd0047756692fc187be920e7 | 1635e722e7ede72f4877671f36bbbc4199abae81 | /revised-addons/mrp_product_cost_calculation/mrp_production.py | 7f27de0d02cc1b22907e862f281f35ad313647a3 | [] | no_license | ecosoft-odoo/sqp | 7c09617048091ac6de4b25a33ad88127d36de452 | 7a7fc6b88087d98d536dd4ec39f9fb572918090e | refs/heads/master | 2023-08-08T00:07:48.405000 | 2023-08-04T15:47:43 | 2023-08-04T15:47:43 | 40,047,976 | 3 | 9 | null | 2023-08-02T08:38:53 | 2015-08-01T13:48:54 | Python | UTF-8 | Python | false | false | 4,254 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Mentis d.o.o.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_production(osv.osv):
_inherit = 'mrp.production'
def action_production_end(self, cr, uid, ids, context=None):
write_res = super(mrp_production, self).action_production_end(cr, uid, ids, context)
if write_res:
_production_ids = self.pool.get('mrp.production').browse(cr, uid, ids, context=None)
for _production_id in _production_ids:
_name = _production_id.name
_product_id = _production_id.product_id.id
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, _product_id, context)
if _production_id.product_id.cost_method == 'average' and accounts['stock_account_input'] and accounts['property_stock_valuation_account_id']:
_debit= 0.00
_credit = 0.00
_move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('name','=',_name),
('product_id','!=',_product_id)])
_move_lines = self.pool.get('account.move.line').browse(cr, uid, _move_line_ids, context=None)
for _move_line in _move_lines:
_debit += _move_line.debit
_credit += _move_line.credit
_move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('name','=',_name),
('product_id','=',_product_id)], order='id')
_move_lines = self.pool.get('account.move.line').browse(cr, uid, _move_line_ids, context=None)
for _move_line in _move_lines:
if _move_line.account_id.id == accounts['stock_account_input']:
_move_line.write({'credit': _credit}, context)
elif _move_line.account_id.id == accounts['property_stock_valuation_account_id']:
_move_line.write({'debit': _debit}, context)
if _debit and _debit != 0.00:
_old_inventory_qty = _production_id.product_id.qty_available or 0.00
_old_inventory_value = _old_inventory_qty * _production_id.product_id.standard_price
_new_inventory_value = _production_id.product_qty * _debit
_new_inventory_qty = _old_inventory_qty + _production_id.product_qty
if _new_inventory_qty and _new_inventory_qty != 0.00:
_new_standard_price = (_old_inventory_value + _new_inventory_value) / _new_inventory_qty
elif _production_id.product_qty and _product_id.product_qty != 0.00:
_new_standard_price = _debit / _production_id.product_qty
else:
_new_standard_price = _debit
product_obj.write(cr, uid, [_product_id], {'standard_price': _new_standard_price}, context)
return write_res
mrp_production() | [
"[email protected]"
] | |
51a1fe32d632ed84c07b28f8a561d7fbd57759b0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/224/52688/submittedfiles/funcoes1.py | 210a3b6367327c545086dc3ad9ee60b36b4fb544 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | # -*- coding: utf-8 -*-
def cres (Ca):
cont=0
for i in range(0,len(Ca),1):
if Ca[i]<Ca[i+1]:
cont=cont+1
if cont==len(Ca)-1:
return True
else:
return False
def decre (lista):
cont=0
for i in range(0,len(lista),1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
def consecutivo(b):
cont=0
for i in range(0,len(b),1):
if b[i+1]-b[1]==1:
cont=cont+1
if cont==len(b)-1:
return true
n=int(input('Digite o tamanho da lista: '))
g=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
g.append(x)
c=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
c.append(x)
w=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
w.append(x)
if cres(g):
print('S')
else:
print('N')
if decre(g):
prin('S')
else:
print('N')
if consecutivo(g):
print('S')
else:
print('N')
if cres(c):
print('S')
else:
print('N')
if decre(c):
prin('S')
else:
print('N')
if consecutivo(c):
print('S')
else:
print('N')
if cres(w):
print('S')
else:
print('N')
if decre(w):
prin('S')
else:
print('N')
if consecutivo(w):
print('S')
else:
print('N') | [
"[email protected]"
] | |
9c7d441f980e1aaef94cc526e2ba03edbd3154fe | 6a0d42149f8bbe5f7d6cb8103fe557d0d048c832 | /carts/views.py | 822a318160da7420c9dc5c3bc34769e0481eb52e | [] | no_license | syfqpipe/product-public | 8f3b2f81d0c9fdc61bb5841db1d4d9d26bb618a1 | 62b918bd9f24b4a47fab04398ca8112268e1e2b1 | refs/heads/master | 2023-01-22T05:14:19.132567 | 2020-12-05T03:22:56 | 2020-12-05T03:22:56 | 318,689,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,799 | py | import hashlib
import json
import datetime
from django.http import JsonResponse
from django.shortcuts import render,redirect
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework import viewsets, status
from rest_framework_extensions.mixins import NestedViewSetMixin
from django_filters.rest_framework import DjangoFilterBackend
from entities.models import Entity
from products.models import Product, ProductSearchCriteria
from services.models import Service, ServiceRequest
from quotas.models import Quota
from .models import (
Cart,
CartItem
)
from .serializers import (
CartSerializer,
CartExtendedSerializer,
CartItemSerializer
)
from transactions.models import Transaction
from users.models import CustomUser
class CartViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Cart.objects.all()
serializer_class = CartSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_fields = ['user', 'cart_status']
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
else:
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def get_queryset(self):
queryset = Cart.objects.all()
return queryset
@action(methods=['POST'], detail=False)
def check_cart(self, request, *args, **kwargs):
request_ = json.loads(request.body)
request_user_id_ = request_['user']
request_user_ = CustomUser.objects.filter(
id=request_user_id_
).first()
cart_ = Cart.objects.filter(
user=request_user_id_,
cart_status='CR'
).first()
print('hello', cart_)
if cart_:
print('ada')
serializer = CartExtendedSerializer(cart_)
else:
print('xde')
new_cart_ = Cart.objects.create(
user=request_user_
)
serializer = CartExtendedSerializer(new_cart_)
return Response(serializer.data)
@action(methods=['GET'], detail=True)
def with_item(self, request, *args, **kwargs):
cart = self.get_object()
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
@action(methods=['POST'], detail=True)
def add_item_to_cart(self, request, *args, **kwargs):
cart_item_request = json.loads(request.body)
# print('cit', cart_item_request)
# Post.objects.filter(user=request.user)
# product_length = CartItem.objects.filter(cart_item_type = 'PR').count()
# print("{0:0>6}".format(product_length))
# Item product
if cart_item_request['item_type'] == 'product':
entity_id = cart_item_request['entity']
product_id = cart_item_request['product']
image_version_id = cart_item_request['image_version_id']
image_form_type = cart_item_request['image_form_type']
year1 = cart_item_request['year1']
year2 = cart_item_request['year2']
cart = self.get_object()
entity = Entity.objects.filter(id=entity_id).first()
product = Product.objects.filter(id=product_id).first()
cart_items = CartItem.objects.filter(cart=cart.id)
print(cart_items)
# Document and image
if image_version_id:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
image_form_type=image_form_type,
image_version_id=image_version_id,
cart=cart,
cart_item_type='PR'
)
# if aaa is None:
# user_id_ = cart_item_request['user']
# delta = datetime.timedelta(hours=24)
# current_time = datetime.datetime.now(tz=timezone.utc)
# date_filter = current_time - delta
# transactions_ = Transaction.objects.filter(
# created_date__gte=date_filter,
# user=user_id_,
# ).all()
# if transactions_:
# product_viewing_fee = Product.objects.filter(slug='document_form_viewing_fee').first()
# new_cart_item_viewing_fee = CartItem.object.create(
# product=product_viewing_fee,
# cart=cart,
# cart_item_type='SE'
# )
# Financial historical
elif year1 and year2:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
year1=year1,
year2=year2,
cart= cart,
cart_item_type='PR'
)
# Products
else:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
cart= cart,
cart_item_type='PR'
)
# Item service
elif cart_item_request['item_type'] == 'service':
service_request_id = str(cart_item_request['service_request_id'])
service_request = ServiceRequest.objects.filter(id=service_request_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
service_request=service_request,
cart= cart,
cart_item_type='SE'
)
# Item quota
elif cart_item_request['item_type'] == 'quota':
quota_id = str(cart_item_request['quota_id'])
quota = Quota.objects.filter(id=quota_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
quota = quota,
cart= cart,
cart_item_type='QU'
)
elif cart_item_request['item_type'] == 'product_search_criteria':
product_search_criteria_id = str(cart_item_request['product_search_criteria_id'])
product_search_criteria = ProductSearchCriteria.objects.filter(id=product_search_criteria_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
product_search_criteria=product_search_criteria,
cart= cart,
cart_item_type='PS'
)
else:
pass
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
@action(methods=['POST'], detail=True)
def add_item_to_cart_bulk(self, request, *args, **kwargs):
cart_item_request_ = json.loads(request.body)
for item in cart_item_request_:
pass
print('Hello')
@action(methods=['POST'], detail=True)
def remove_item_from_cart(self, request, *args, **kwargs):
cart_item_id = json.loads(request.body)['cart_item_id']
cart_item = CartItem.objects.filter(id=cart_item_id).first()
cart = self.get_object()
cart.cart_item.remove(cart_item)
cart.save()
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
| [
"[email protected]"
] | |
446ef7b61a2cbc8d7ca4a6da2a5caf36c3e620a7 | d4fac8ce52d8a058bb12fda402b9d25e24271ae6 | /examples/twisted/websocket/echo_site_tls/server.py | a5a43b7321a46fdf3dc8da70287bd241a662bbc8 | [
"MIT"
] | permissive | workingwim/AutobahnPython | 392ce2f11c320fe4b36bc0eefed1971418575394 | 3fce8aca718335db99aba7adbd4426c8a81cb0e0 | refs/heads/master | 2020-12-25T04:27:52.868814 | 2015-08-09T15:12:40 | 2015-08-09T15:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource, \
HTTPChannelHixie76Aware
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key',
'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:8080",
debug=debug,
debugCodePaths=debug)
factory.protocol = EchoServerProtocol
factory.setProtocolOptions(allowHixie76=True) # needed if Hixie76 is to be supported
resource = WebSocketResource(factory)
# we server static files under "/" ..
root = File(".")
# and our WebSocket server under "/ws"
root.putChild("ws", resource)
# both under one Twisted Web Site
site = Site(root)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenSSL(8080, site, contextFactory)
reactor.run()
| [
"[email protected]"
] | |
fe977e99b30b1b067f56662d7f8d44e7183e60c7 | ee838ab1d552cd080ef02256e7049d4965c1eff1 | /backend/accessbahamas_20535/urls.py | f31c15ab8873d6011ec3551ab4e7fe58c25b347b | [] | no_license | crowdbotics-apps/accessbahamas-20535 | 8f365322474ab9c315a01face305b7f0f8c6c1b2 | 87721082fda4e79c8aa2b580ddadd6967c2cad32 | refs/heads/master | 2022-12-23T19:03:01.061507 | 2020-09-21T22:11:00 | 2020-09-21T22:11:00 | 297,465,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | """accessbahamas_20535 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("course.api.v1.urls")),
path("course/", include("course.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "AccessBahamas"
admin.site.site_title = "AccessBahamas Admin Portal"
admin.site.index_title = "AccessBahamas Admin"
# swagger
api_info = openapi.Info(
title="AccessBahamas API",
default_version="v1",
description="API documentation for AccessBahamas App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
8b14a5f5b37c67b4d3fa56854765efd86eaa81b9 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/demos/legacy_pusher/bc1.py | b253935fcf875c8d3cd2a4123dc8b9009f53c566 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,529 | py | """Example behavior cloning script for pointmass.
If you are trying to run this code, ask Ashvin for the demonstration file:
demos/pointmass_demos_100.npy (which should go in your S3 storage)
"""
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
from multiworld.envs.pygame.point2d import Point2DWallEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multienv import SawyerPushAndReachXYEasyEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.arglauncher import run_variants
import numpy as np
def her_td3_experiment(variant):
import gym
import multiworld.envs.mujoco
import multiworld.envs.pygame
import rlkit.samplers.rollout_functions as rf
import rlkit.torch.pytorch_util as ptu
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.torch.grill.launcher import get_video_save_func
from rlkit.demos.her_bc import HerBC
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
from rlkit.data_management.obs_dict_replay_buffer import (
ObsDictRelabelingBuffer
)
if 'env_id' in variant:
env = gym.make(variant['env_id'])
else:
env = variant['env_class'](**variant['env_kwargs'])
observation_key = variant['observation_key']
desired_goal_key = variant['desired_goal_key']
variant['algo_kwargs']['her_kwargs']['observation_key'] = observation_key
variant['algo_kwargs']['her_kwargs']['desired_goal_key'] = desired_goal_key
if variant.get('normalize', False):
raise NotImplementedError()
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
replay_buffer = ObsDictRelabelingBuffer(
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
obs_dim = env.observation_space.spaces['observation'].low.size
action_dim = env.action_space.low.size
goal_dim = env.observation_space.spaces['desired_goal'].low.size
exploration_type = variant['exploration_type']
if exploration_type == 'ou':
es = OUStrategy(
action_space=env.action_space,
**variant['es_kwargs']
)
elif exploration_type == 'gaussian':
es = GaussianStrategy(
action_space=env.action_space,
**variant['es_kwargs'],
)
elif exploration_type == 'epsilon':
es = EpsilonGreedy(
action_space=env.action_space,
**variant['es_kwargs'],
)
else:
raise Exception("Invalid type: " + exploration_type)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim + goal_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = HerBC(
env,
exploration_policy,
policy,
variant["demo_path"],
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
if variant.get("save_video", False):
rollout_function = rf.create_rollout_function(
rf.multitask_rollout,
max_path_length=algorithm.max_path_length,
observation_key=algorithm.observation_key,
desired_goal_key=algorithm.desired_goal_key,
)
video_func = get_video_save_func(
rollout_function,
env,
policy,
variant,
)
algorithm.post_epoch_funcs.append(video_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
size = 0.1
low = (-size, 0.4 - size, 0)
high = (size, 0.4 + size, 0.1)
variant = dict(
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1001,
num_steps_per_epoch=1,
num_steps_per_eval=1000,
max_path_length=100,
num_updates_per_env_step=1,
batch_size=128,
discount=0.99,
min_num_steps_before_training=0,
reward_scale=100,
render=False,
collection_mode='online',
parallel_env_params=dict(
num_workers=1,
),
),
her_kwargs=dict(
observation_key='state_observation',
desired_goal_key='state_desired_goal',
),
td3_kwargs=dict(
weight_decay=0.0,
),
),
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=1.0,
fraction_resampled_goals_are_env_goals=0.0,
ob_keys_to_save=['state_observation', 'state_desired_goal'],
),
qf_kwargs=dict(
hidden_sizes=[64, 64],
),
policy_kwargs=dict(
hidden_sizes=[64, 64],
),
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
max_sigma=.8,
),
exploration_type='ou',
observation_key='state_observation',
desired_goal_key='state_desired_goal',
init_camera=sawyer_pusher_camera_upright_v2,
do_state_exp=True,
save_video=False,
imsize=84,
snapshot_mode='gap_and_last',
snapshot_gap=50,
env_class=SawyerPushAndReachXYEasyEnv,
env_kwargs=dict(
hide_goal=True,
reward_info=dict(
type="state_distance",
),
),
demo_path="demos/pusher_demos_100.npy",
num_exps_per_instance=1,
)
search_space = {
# 'env_id': ['SawyerPushAndReacherXYEnv-v0', ],
'seedid': range(3),
'algo_kwargs.base_kwargs.num_updates_per_env_step': [1, ],
'replay_buffer_kwargs.fraction_goals_are_rollout_goals': [0.0, 0.5, 1.0],
'algo_kwargs.td3_kwargs.weight_decay': [0.0, 1e-3, 1e-4, 1e-5],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 1
mode = 'ec2'
exp_prefix = 'sawyer_pusher_state_final'
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(her_td3_experiment, variants, run_id=1)
# for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
# for i in range(n_seeds):
# run_experiment(
# her_td3_experiment,
# exp_prefix=exp_prefix,
# mode=mode,
# snapshot_mode='gap_and_last',
# snapshot_gap=50,
# variant=variant,
# use_gpu=True,
# num_exps_per_instance=5,
# )
| [
"[email protected]"
] | |
dfc4d96c5e1e422d9a810b84251872ff50ca2b40 | cae22103ac9b5d3aa90a6ee48e9e6694474fe64f | /recipes.py | e994a76f42ccd32316d88a12329c623614cb7446 | [
"MIT"
] | permissive | lennykioko/Yummy_recipes_2 | 22cf7f47971bc0197bf371696939c2c1a5dcb54b | 45e75612eff9cf5190014b4b3a7b6366d1479c7f | refs/heads/master | 2021-09-09T02:07:02.043545 | 2018-03-13T09:38:00 | 2018-03-13T09:38:00 | 114,108,725 | 1 | 0 | MIT | 2018-01-23T11:48:35 | 2017-12-13T10:44:42 | HTML | UTF-8 | Python | false | false | 1,379 | py | """Handle data on recipes
Created: 2018
Author: Lenny
"""
all_recipes = {}
class Recipe(object):
"""Contain recipe creation, update and delete methods"""
def create(self, title='', category='', description=''):
"""create a new recipe"""
global all_recipes
if title != '' and category != '' and description != '':
if title not in all_recipes:
all_recipes[title] = [title, category, description]
return "Recipe created succesfully"
return "Title already exists"
return "Please fill in all fields"
def update(self, title='', category='', description=''):
"""update an existing recipe"""
global all_recipes
if title != '' and category != '' and description != '':
if title in all_recipes:
all_recipes[title] = [title, category, description]
return "Sucessfully updated"
return "Recipe does not exist"
return "Please fill in all fields"
def delete(self, title=''):
"""delete an existing recipe"""
global all_recipes
if title != '':
try:
del all_recipes[title]
return "Successfully deleted"
except KeyError:
return "Recipe does not exist"
return "Please fill in all fields"
| [
"[email protected]"
] | |
62879a3a29b2b619eb653cd7fb95819aca929e32 | 4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c | /OrcLib/LibProcess.py | 1b49001ddde66a2937ebcb8166f2bdfb5335b08d | [] | no_license | orange21cn/OrcTestToolsKit | eb7b67e87a608fb52d7bdcb2b859fa588263c136 | 69b6a3c382a7043872db1282df4be9e413d297d6 | refs/heads/master | 2020-04-15T07:30:35.485214 | 2017-09-30T06:16:17 | 2017-09-30T06:16:17 | 68,078,991 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,107 | py | # coding=utf-8
from OrcLib.LibLog import OrcLog
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import ResourceCheck
_logger = OrcLog('basic.process')
def get_mark(p_flag, p_id):
"""
获取标识
:param p_flag:
:param p_id:
:return:
"""
if "BATCH" == p_flag:
return get_batch_mark(p_id)
elif "CASE" == p_flag:
return get_case_mark(p_id)
elif "STEP" == p_flag:
return get_step_mark(p_id)
elif "ITEM" == p_flag:
return get_item_mark(p_id)
elif "PAGE_DEF" == p_flag:
return get_page_def_mark(p_id)
elif "PAGE_DET" == p_flag:
return get_page_det_mark(p_id)
elif "WIDGET" == p_flag:
return get_widget_mark(p_id)
elif 'DATA' == p_flag:
return get_data_mark(p_id)
else:
return None
def get_batch_mark(p_batch_id):
"""
获取计划标识
:param p_batch_id:
:return:
"""
resource_batch_def = OrcResource('BatchDef')
# 获取计划信息
batch_def_info = resource_batch_def.get(path=p_batch_id)
if not ResourceCheck.result_status(batch_def_info, u'查询计划数据', _logger):
return None
if not batch_def_info.data:
return None
return batch_def_info.data['batch_no']
def get_case_mark(p_case_id):
"""
获取用例显示标识
:param p_case_id:
:return:
"""
resource_case_def = OrcResource('CaseDef')
# 获取用例信息
case_def_info = resource_case_def.get(path=p_case_id)
if not ResourceCheck.result_status(case_def_info, u'查询用例数据', _logger):
return None
if not case_def_info.data:
return None
return case_def_info.data['case_path']
def get_step_mark(p_step_id):
"""
获取步骤显示标识
:param p_step_id:
:return:
"""
resource_case_det = OrcResource('CaseDet')
# 获取用例步骤数据
case_det_info = resource_case_det.get(parameter=dict(step_id=p_step_id))
if not ResourceCheck.result_status(case_det_info, u'查询用例步骤数据', _logger):
return None
if not case_det_info.data:
return None
# 获取用例数据
case_mark = get_case_mark(case_det_info.data[0]['case_id'])
if case_mark is None:
return None
return "%s:%s" % (case_mark, case_det_info.data[0]['step_no'])
def get_item_mark(p_item_id):
"""
获取执行项显示标识
:param p_item_id:
:return:
"""
resource_step_det = OrcResource('StepDet')
# 获取步骤步骤项数据
step_det_info = resource_step_det.get(parameter=dict(item_id=p_item_id))
if not ResourceCheck.result_status(step_det_info, u'查询步骤步骤项数据', _logger):
return None
if not step_det_info.data:
return None
# 获取步骤标识
step_mark = get_step_mark(step_det_info.data[0]['step_id'])
if step_mark is None:
return None
return "%s:%s" % (step_mark, step_det_info.data[0]['item_no'])
def get_page_def_mark(p_page_def_id):
"""
获取页面显示标识
:param p_page_def_id:
:return:
"""
resource_page_def = OrcResource('PageDef')
# 获取 page_def 信息
page_def_info = resource_page_def.get(path=p_page_def_id)
if not ResourceCheck.result_status(page_def_info, u'查询页面数据', _logger):
return None
if not page_def_info.data:
return None
print page_def_info.data
return page_def_info.data['page_flag']
def get_page_det_mark(p_page_det_id):
"""
获取环境.页面显示标识
:param p_page_det_id:
:return:
"""
resource_page_det = OrcResource('PageDet')
resource_dict = OrcResource('Dict')
# 查询环境页面信息
page_det_info = resource_page_det.get(path=p_page_det_id)
if not ResourceCheck.result_status(page_det_info, u'查询环境页面信息', _logger):
return None
if not page_det_info.data:
return None
# 查询页面信息
page_def_info = get_page_def_mark(page_det_info.data['page_id'])
if not ResourceCheck.result_status(page_det_info, u'查询页面信息', _logger):
return None
if not page_det_info.data:
return None
# 查询环境信息
env_info = resource_dict.get(parameter=dict(dict_flag='test_env', dict_value=page_det_info.data['page_env']))
if not env_info:
return None
return "%s:%s" % (env_info.data[0]['dict_text'], page_def_info)
def get_widget_mark(p_widget_id):
"""
获取控件显示标识
:param p_widget_id:
:return:
"""
resource_widget_def = OrcResource("WidgetDef")
# 查询控件信息
widget_def_info = resource_widget_def.get(path=p_widget_id)
if not ResourceCheck.result_status(widget_def_info, u'查询控件信息', _logger):
return None
if not widget_def_info.data:
return None
print "-=-=-=", widget_def_info.data
return widget_def_info.data['widget_path']
def get_data_mark(p_id):
"""
获取数据标识
:param p_id:
:return:
"""
return p_id
| [
"[email protected]"
] | |
99b05296f13f43de6bc7287bc0cbef34834095d2 | 085f74ad9dcaf192914cf191694f60201d1c271e | /mindinsight/datavisual/data_transform/loader_generators/loader_generator.py | 44e7210efb31379a38366eb7976f163dfe774dfd | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | fapbatista/mindinsight | 8aeb48c739c505de1a2b32c694b6310a4b7d4e85 | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | refs/heads/master | 2023-02-08T17:25:03.950321 | 2020-12-29T12:38:28 | 2020-12-29T12:38:28 | 325,083,601 | 0 | 0 | Apache-2.0 | 2020-12-29T12:38:30 | 2020-12-28T18:13:10 | Python | UTF-8 | Python | false | false | 1,710 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base loader generator."""
from abc import abstractmethod
MAX_DATA_LOADER_SIZE = 15
class LoaderGenerator:
"""Base loader generator for loader generators."""
@abstractmethod
def generate_loaders(self, loader_pool):
"""
Abstract method for generating loaders.
Args:
loader_pool (dict[str, LoaderStruct]): Current loader pool in data_manager.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
@abstractmethod
def check_train_job_exist(self, train_id):
"""
Abstract method for checking if train job exists.
Args:
train_id (str): Train ID.
Returns:
bool, if train job exists, return True.
"""
@abstractmethod
def generate_loader_by_train_id(self, train_id):
"""
Abstract method for generating loader by train id.
Args:
train_id (str): Train ID.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
| [
"[email protected]"
] | |
35c81825c15777a71b5467275e3808b8693f61de | 03ef0d8f5cc5fcaffff188e0f8fcb76fa1986b53 | /inxwords.py | cffc4010b7366c56e9cef92febb68f13eafd234e | [] | no_license | macressler/inXwords | f858b93ad6ae323485f48e47352e7dcb0437622e | 28b68cacc84ac3d6d4dc736843fd2c0058c0397a | refs/heads/master | 2020-12-03T10:24:43.953646 | 2015-09-14T14:37:32 | 2015-09-14T14:37:32 | 44,265,115 | 1 | 0 | null | 2015-10-14T17:37:30 | 2015-10-14T17:37:29 | null | UTF-8 | Python | false | false | 9,081 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Find a trending topic along the lines of #YinXwords and
tweet a random X-word sentence from Project Gutenberg.
"""
from __future__ import print_function, unicode_literals
try:
import resource
mem0 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024*1024.0)
except ImportError:
# resource not on Windows
pass
import argparse
import os
import random
import re
import sys
import time
import twitter
import webbrowser
import yaml
from pprint import pprint
try:
# http://stackoverflow.com/a/2282656/724176
from timeout import timeout, TimeoutError
except (AttributeError, ImportError) as e:
# Not on Windows or module not present
timeout = None
TimeoutError = None
REGEX = re.compile("[Ii]n([0-9]+|[Tt]hree|[Ff]our|[Ff]ive|[Ss]ix)[Ww]ords$")
# Dict of number of words in a sentence->all those sentences
BIG_OLD_CACHE = {}
WOE_IDS = {
"World": 1,
"Australia": 23424748,
"Canada": 23424775,
"UK": 23424975,
"US": 23424977,
}
TWITTER = None
SEPERATORS = [" ", " ", " ", " ", "\n", "\n", "\n\n"]
# To avoid rate limits
get_trends_place_timestamp = 0
# cmd.exe cannot do Unicode so encode first
def print_it(text):
print(text.encode('utf-8'))
def load_yaml(filename):
"""
File should contain:
consumer_key: TODO_ENTER_YOURS
consumer_secret: TODO_ENTER_YOURS
access_token: TODO_ENTER_YOURS
access_token_secret: TODO_ENTER_YOURS
"""
f = open(filename)
data = yaml.safe_load(f)
f.close()
if not data.viewkeys() >= {
'access_token', 'access_token_secret',
'consumer_key', 'consumer_secret'}:
sys.exit("Twitter credentials missing from YAML: " + filename)
return data
def get_twitter():
global TWITTER
if TWITTER is None:
data = load_yaml(args.yaml)
# Create and authorise an app with (read and) write access at:
# https://dev.twitter.com/apps/new
# Store credentials in YAML file
TWITTER = twitter.Twitter(auth=twitter.OAuth(
data['access_token'],
data['access_token_secret'],
data['consumer_key'],
data['consumer_secret']))
return TWITTER
def ends_with_in_x_words(text):
"""
If "inXwords" found at the end of the text, return the X.
Otherwise return 0.
"""
if not text:
return 0
found = REGEX.findall(text, re.IGNORECASE)
if found:
found = found[0]
try:
return int(found)
except ValueError:
if found.lower() == "three":
return 3
elif found.lower() == "four":
return 4
elif found.lower() == "five":
return 5
elif found.lower() == "six":
return 6
return 0
def get_trending_topic_from_twitter():
global get_trends_place_timestamp
# "This information is cached for 5 minutes. Requesting more frequently
# than that will not return any more data, and will count against your
# rate limit usage."
if time.time() - get_trends_place_timestamp > 300:
print("Get fresh trend")
else:
print("Use cached trend")
return args.trend, ends_with_in_x_words(args.trend)
t = get_twitter()
# Returns the locations that Twitter has trending topic information for.
# world_locations = TWITTER.trends.available()
# pprint(world_locations)
# print("*"*80)
# Shuffle list of WOE_IDS, and go through each until a match is found
pprint(WOE_IDS)
woe_ids = WOE_IDS.items()
random.shuffle(woe_ids)
pprint(woe_ids)
for woe_id in woe_ids:
print(woe_id)
print("GET trends/place")
trends = t.trends.place(_id=woe_id[1])[0]
get_trends_place_timestamp = time.time()
for trend in trends['trends']:
print("-"*80)
pprint(trend)
print_it(trend['name'])
how_many_words = ends_with_in_x_words(trend['name'])
print(how_many_words)
if (not trend['promoted_content'] and
how_many_words >= 3 and
how_many_words <= 6):
args.trend = trend['name']
return trend['name'], how_many_words
print("No fresh trend found, use cached")
return args.trend, ends_with_in_x_words(args.trend)
def get_random_sentence_from_pg(number_of_words):
infile = os.path.join(args.sendir,
str(number_of_words) + "-word-sentences.txt")
print(number_of_words in BIG_OLD_CACHE)
if number_of_words not in BIG_OLD_CACHE:
with open(infile) as f:
BIG_OLD_CACHE[number_of_words] = f.read().splitlines()
return random.choice(BIG_OLD_CACHE[number_of_words])
def tweet_it(string, in_reply_to_status_id=None):
global TWITTER
if len(string) <= 0:
print("ERROR: trying to tweet an empty tweet!")
return
t = get_twitter()
print_it("TWEETING THIS: " + string)
if args.test:
print("(Test mode, not actually tweeting)")
else:
print("POST statuses/update")
result = t.statuses.update(
status=string,
in_reply_to_status_id=in_reply_to_status_id)
url = "http://twitter.com/" + \
result['user']['screen_name'] + "/status/" + result['id_str']
print("Tweeted: " + url)
if not args.no_web:
webbrowser.open(url, new=2) # 2 = open in a new tab, if possible
# @timeout(30)
def inxwords():
"""
Main stuff callable in loop
"""
print("Get a topic from Twitter")
trend, how_many_words = get_trending_topic_from_twitter()
pprint(trend)
print("How many words?", how_many_words)
if not trend:
print("Nowt found, try later")
return
# Find a matching sentence from PG
random_sentence = get_random_sentence_from_pg(how_many_words)
print(random_sentence)
# 1 in 4 chance to add quotes
if random.randint(0, 3) == 0:
random_sentence = '"' + random_sentence + '"'
# Random order of text and hashtag
things = [trend, random_sentence]
random.shuffle(things)
print(">"+" ".join(things)+"<")
# Random separator between text and hashtag
tweet = random.choice(SEPERATORS).join(things)
print(">"+tweet+"<")
print("Tweet this:\n", tweet)
try:
tweet_it(tweet)
except twitter.api.TwitterHTTPError as e:
print("*"*80)
print(e)
print("*"*80)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Find a trending topic along the lines of #YinXwords and "
"tweet a random X-word sentence from Project Gutenberg.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-y', '--yaml',
# default='/Users/hugo/Dropbox/bin/data/inxwords.yaml',
default='E:/Users/hugovk/Dropbox/bin/data/inxwords.yaml',
help="YAML file location containing Twitter keys and secrets")
parser.add_argument(
'-s', '--sendir',
# default='/Users/hugo/Dropbox/txt/gutenberg/',
default='E:/Users/hugovk/Dropbox/txt/gutenberg',
help="Directory of files containing sentences from Project Gutenberg")
parser.add_argument(
'-t', '--trend', default=None,
help="Default trend to use if none found")
parser.add_argument(
'-l', '--loop', action='store_true',
help="Run repeatedly with a delay")
parser.add_argument(
'-d', '--delay', type=int, default=15, metavar='minutes',
help="Delay between loops")
parser.add_argument(
'-nw', '--no-web', action='store_true',
help="Don't open a web browser to show the tweeted tweet")
parser.add_argument(
'-x', '--test', action='store_true',
help="Test mode: go through the motions but don't update anything")
args = parser.parse_args()
if args.loop:
while(True):
try:
inxwords()
print("Sleep for " + str(args.delay) + " minutes")
time.sleep(args.delay*60)
except TimeoutError as e:
print("*"*80)
print(e)
print("*"*80)
print("Sleep for " + str(args.delay*60-30) + " seconds")
time.sleep(args.delay*60-30)
except twitter.api.TwitterHTTPError as e:
# Rate limit? Try again soon.
print("*"*80)
print(e)
print("*"*80)
print("Sleep for just over a minute")
time.sleep(61)
else:
inxwords()
try:
mem1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024*1024.0)
print(mem0)
print(mem1)
print(mem1-mem0)
except NameError:
# resource not on Windows
pass
# TODO call from a .sh/.bat looping as well, in case of exceptions
# End of file
| [
"[email protected]"
] | |
520660869fff6f627576c21a06b8c5e65de775b0 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/acc42607206743369b303b27783bb589.py | ef6fcb7229e7a8455b6ddfa3d7f2e026020e8853 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 186 | py | #leap year exercise
def is_leap_year(year):
if year % 400 == 0:
return True
elif year % 100 == 0:
return False
elif year % 4 == 0:
return True
else:
return False
| [
"[email protected]"
] | |
d52cdf7164481357e9cc6e6bcf6c8d7b3ad73a73 | 17265d8af472ffd4dc629145f85497ce89dc3139 | /JaPanda/asgi.py | 90a3f1cd809c42ed4e1c04758670d9cb3e361280 | [] | no_license | Tomo-zhiye/JaPanda | 88eb69329d4432b4dd5d028b006f632bafff136c | 0391140563b2c324738335b6b82018f08f596dfe | refs/heads/master | 2023-04-21T19:41:35.344682 | 2021-04-29T08:14:20 | 2021-04-29T08:14:20 | 362,740,629 | 0 | 1 | null | 2021-04-30T12:46:48 | 2021-04-29T08:12:57 | Python | UTF-8 | Python | false | false | 391 | py | """
ASGI config for JaPanda project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JaPanda.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
e9b50774cd67f89ff82ec7dfb29f5273a1cbcc3d | 8e542d1217ba58c5e04866b9a34cc0c0306701bb | /backend/users/api/v1/urls.py | 5554d6d5775e2f23807cf3a0ef413a5c40d7ec57 | [] | no_license | crowdbotics-apps/mobile-11-aug-dev-8774 | 6e3d2d3faa372dbad3d18924f3c37640715a2184 | 37669c511daa89cb38804a125156531f9b798d84 | refs/heads/master | 2023-07-06T02:47:39.279077 | 2020-08-11T11:33:03 | 2020-08-11T11:33:03 | 286,648,832 | 0 | 0 | null | 2021-08-03T20:01:40 | 2020-08-11T04:53:05 | JavaScript | UTF-8 | Python | false | false | 254 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import HfkhfjfhkViewSet
router = DefaultRouter()
router.register("hfkhfjfhk", HfkhfjfhkViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"[email protected]"
] | |
05d05ed538bd5f49ce99838e53ce9c249b735d0d | 6bdad555fd073e8b1c73e373782249c364a0b5bd | /quotes/migrations/0017_auto_20170803_0027.py | 20fa87cd87f26592cfdceb32a2102aaf0d656be7 | [] | no_license | h4k1m0u/gistutorials | 35383f5e1197553c528bc75405094118815e72fd | 795da31428a469becb948deefe57c725116266be | refs/heads/master | 2023-01-05T20:38:39.552302 | 2019-04-29T01:03:34 | 2019-04-29T01:03:34 | 309,973,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-03 00:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quotes', '0016_auto_20170325_0158'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='user',
),
migrations.RemoveField(
model_name='quote',
name='member',
),
migrations.DeleteModel(
name='Member',
),
]
| [
"[email protected]"
] | |
d64835c8fd6f2cf6535393ea38a722b47cff4a05 | 192d54eadfdcd2588294e376c682fe97a8a6c160 | /wLyN84vMuTzXVjKJ/de0R1ljEpByhfkqz.py | 5df02c8b7838368769047052ee7ea0e1aab3e7ce | [] | no_license | urlib/i1OB8Mxh | fc953ae8e8dc4fe51b7588e77b28ad6fc9709458 | 7811957b67e50da2f38bb47cedac8ff16d4396d0 | refs/heads/master | 2023-01-08T19:56:26.906229 | 2020-04-15T03:52:43 | 2020-04-15T03:52:43 | 248,736,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,144 | py | 𬐤𭘰饕쬻𗤰뚟𨳛𧻪⠴𝐐ᣦ剼🇹𛰂屢𠝧𬙓𞋵⼃🃠𡵛𤡆ൕ𫘄賽碣𣨳𥑐𩆻𗟒𪁵🎁🃴𬟒䘔씞𡡷漼𡙑𘫯𤸒で𭡤꼁츏𡏦⁁ʦ𮩧𭳮햾𞥕𭘻𗎟䴍𧒴慌𮨩仴ꄪ䓋𝐎𭫷𮨠৸𖾅屨퓾㾊𫉩䊦𬐔𗯶⣨𭹂𔐥𧲄੦켡𗀳β劭𭔉㲯䨜𧧈𢼦𑦳탏𖩁볮뷄皠ㄐ𪒩𮥛𣝖𧢹ᵠ𘂍供𐝄𭛂🖅𩭳𫷋𢹇𨳧𦪄监𮯏⎡풢禨𦗮鹞𪂇鳔𧺜굗狼𗯦𥚿𠖔蠢𬂁𗭈𪣟𗁼𤵉𣓫𣐴𣻛𭅱䕂鲎𐃰ꈷ𢍊𣱴𠞏𬸩𫉶趼⢺󠇝좇𪣺𬀄𮌅䒂㢴퀀𬧴왜暤𬙗𢳙ꞩ徊ፀ瀀𡌓𢃭邔ᔇූ𨚛𮂖𤂼ꄄ𒉬𭑩𭐋핧䦴𫔕𭌋㍗ᦢ𢠃𧠨毺䪦놂𗡛🐈寠煯䀅䎙摒잉𡫽𓋸웆ᴙ𥛃𣒉莧쳥蘾挛𪪔🥾𗮾𧂏ခ﹅𝠒㰱𠽯돱𩖯𨏔꼡𥛽뫲Ŷ🠗镶𢔵倓శ𘌟𧋶纗𖠞雡𘧼𬠒뮰䢀𪇄뵽𥏗𮅇虮𢊚𠛗𨃂𦉸𧀌ຬ늨𒀧躪밃𨩦𣉓𪧡𘓾𧮜𪭼ǹ𠩍𮇺𪿀󠄲𢁲𪊇瓲蹮𢧥㼀狕᰷𢆛㗤𬛣𡋟﯁𤱣𘏺𤹡㱳𮌄𥄎賄啼色𝧳𩏋𢦺𠬖𛈔𘚕𑨲𥣓𒔄䐌匰㢁䑆ꭴ싯𧏖ẜ𤿜🔴𝃏铥𓅖㹿㑢◄犘뙭棈𥫡𤀵𢭤𦧺歚ꢉ𗍀𤰼𢟀𗪭瘮𢸆🁩𨦒ꎸ𣶿殥𥭱ꥲ𪚕𭟔汷𫚓𧽃苏𮉵𭜡낄𡱃𣀊𪸜픏𫦝𨈈𪯌𡑚𢢬𭡼𥔁𗂵ʄ𭣉𧛊銂䉋Ꮃ겓珒핑뫗𠔒𡽃河𘋤𭫅𧷈⣉𧸢㼋휯㸯䋬䯪汥냄𦿬馐㋈𢰜𫙛𓂷𘔜𭡭洡꒳膊𨉢晶𡏦낃𨠕𖫞𨋫𣮖㕧𮒮𮧓𥽧𨍟𢺓₄𥉖🅁𭏇J𭱝𢍓틨𛱟𮭧勒釉蛙𧬂𮨨𨈩𩎱ꀼ兴⏳𧦵𗗒ॸ𠰟𧏓𣛟𨩏킺❛𑜻𣻤爚𭄢𣉚㩝岞𑅤𦲌핵ꜜ𬈒ιᙪ𦊠뗼𗤥掖惠𡵆𠆨𬱫𩭕𥊭㮴𡻔𗴭䗨𮔏𘦉𣦇𬶳𠐗읥꜀𗘞𠿚㰯𪫴𑃷𤶜𤀁𦻕未𣚳𗊊𐎝𥌸𮀠𦿤ጒ𣝹琓︙𓋍鰩ヴ뵐𩩻𩨶𠖊𦥉𗔗䐬𘜉𢥄𢄫ᔸ㑑得렕溒𥩳㺬𝡲𣡍𪾹앷🟅𗼴ɵ𞠶촕𗦼إ𭪓櫝𒔶𥛳銑쩉㸂𐀘𩏉𩫑᪗𑇂𡏼铸ꆔ뺐𤈻꫁욶擻巩𗍈𢐛𨢨︔𝍀𨭟𮇸𓉶𩛓𠅄𧍑娬𢭥𣅈👗𢑍𭭸𫥭𭝪𧐩𐔅𗖖𠃻耷𭍂𧷵栆鲶ⶃ𥧺䡳懁𨋅𝥁𥬼떱𞡠𧵞㶈פּ䒙𪛊畼Ꞅ郎鼦𝛝𘏣𡨤〢𬿊愈𨓻𗕗𥲡⦅𗪦𩟉ᅣকꀰ曹쮴𘠶瘠𡔷𣜬𮒴ና𪞙綏𡱖䐻Ꚑ𧋯𦩾𐴥놫𐝎펓쌄𨬸ᵽ𠪍ᾇ𧷴씠𦥅읕𣰠軽𭛁𘢐𫧝𣕱𢮨𦇒𢢦𡍨𤎷𨳬🦱𐂄𠈫𫞆𣤛窗𘨭𓍏𡜇𣻨ᐢ𦃄𥎧許訥𭱅퇜徴㿏㪕碿𥍦ᖰ𥡂𤱐𨜮𩗅𦎞わ𪈆𩳁𢰼穖𨬶薚𝑻𗲵阖𢖸⸍𬃶𭈷雁𬸯灗ᷫ㶶𘢣𭙤/檶𢌾쀋鞄恓𣳺鶟䲡𬗨𣧜쬺𠕆⏎𘪹𧱄𮡦𠍕∡𤑾𑚡ވ𧣃𓏠⨹𞺱ڛ䲥𬓤칈鶆᧦𢺊𩶚𐣾𠱙뇔𒅆㫂𩲮羇𘧤𪍫𑂠뺧𩍢𡋏𑶩𮌚𮫣𭁛Ѣ櫝𐛬𡼛𪎛𡀊𠻩ⶒ𥁦𨣊𡫤𑨭𫸻𘪎𮨔𖤽𮁞𗇻儆𣎄छ쳄Ꮯᕮ𥦇𫹝갈╭𬆇𠢂𧹔𣊨䚭𡺵𢠷𝖜㯲吚𤗞𢰣ጚ茣𛀾𝂬炎騹අ裶𤶐𬅇𠀇㢁惖瞋𠅉蟊𬕢ꅨ🈬𗬁ᅧ𫝡슢ꂼ䮔𩐋𥠢𒍟勅𨗅𒋌𡟼𩟨𫫔淵龐㜩𩜀𗌙횐癎𡎭笪ྈ𤉊𮌮𦴓𧏼𤠂ᾒ𩠞𪼧𬬟᳸鶂𥓁ﺮ艑偳伕絲𪉦㉟촹𓉲𝀿𪣏𩽚騻긖𢩹ꍘ𤪫쾔쐆🚚𦁤ㅚ嘚𠤃㡚裚𮝖ꕤ𠾹𠷦値𡋝𡝡햚𡤃壎栝𗒝𬟉𢸐䰗𐋤𫡝𤺎🢒𗨉曫𦉣莠쉯𤑅𭉆𔓘𞢫𧧨𨠲餻𥢂𩙅웑𒔊𠦄𡪜𡴪𨧨𫂯龁옌𗺎溪쵻滺𫕌𪎂𢖡𡴛𧍯𑄗𩜥𩝤訜嚖謵𦉎噪‷𭷐𠘞𮞏𒀑얌𗐚′𤤑𪏖𮐛𖮎𠢙돈𣺬𝆗̆𠙄⳩𨍗蚨倚ﯢ𛅿𫇈𨅓𘣠铄𫐝ᖘ𫚮𖧀𫭒𗖟펜𥈱𔒳𢭠𠸖𬡀仮𐊼𨌧𨛯擽𬒰깷犌🧐𤿦𦡓ᩝ𭵗𒉥嬏𣬚𦴉鷏𐼱𡥬🁘𔒙噊𮨸𐎬躪𬕯ꂮ𨯆𝀳𠿘𧬆𦗴𩲃铧𠒶𢡙𭈌𡓂똳𤔸𖬇𥍫繛𣺉뵉𡈓괐𐋬؋𢴠𬎃失𐄫𑴑𔗡𥑳ﳳ橯徰𘨖笺𦗂柹𠘴𫗟𘋓𦶋𭺬𫛀𥪍ԟ𭰬遀𐄱𤰸𤬳溺𥗾㱲儈ೕ𣚦𘘦𮀆鸙𮀚𪢚鷞𘕌鳅𩝡𦂑𘚳𮩑𘛔𡥅뷐뜘惵𗃜ﹶ𤵌𢭯岎𦰣𪷖𨲪𗻽𝥀𮗶𡺂𡛸𦿽𠕲𑜥𫓄𪱍𖺖Ⲃ𭃿𭂨㹨𑩃㫋𮙬𪫩𢔱𠷠𣴯𢉻𔒞𥻕𩙪𠲽𗪙𪓊𡃧𡃍𘋅𩢤遱𦙼𣨳𓅶ꂍ𑲱𩩡𝤥𒐚꿈橂𗥙妷鿗僺𤈴鹺𠎍韻줍𨊐𡺃𩫧𝘒兛𩤿중䐢𝪃祜ݹ𢻬𠇭𫋨둤ࡈ༸먀줨𫅃鎬𐜑籲꣼𢧢𝕧𝓂滍𒄦𧇒焕𣅯橏𨰛⊜𥐁葿𦌼㴭氫䧼𬰉𐄞჻稠𦧮泽𑿘쓪𥉛ॣഢ𨨔跒𤈯𣬅흘ꅯ𓁱𐳿헝𠶰𛀀𞣌𤻤蝼蕪𣄆𡽲𡵽𨃵령ᗩ锪䧗赴𠉤봝𥖧𘃓㾺𤆧눩𪳀𓉙𧱄ኊ𒅠𬗎𪛖뀲뛋ꆱ𣪳㧦🧔𔖕諨릞𮓳Ԛ𤌚𠋯𭻎𒊆㮻𦛜𘛵𮥶齣𤤃𮠥𮬠𢬰𢄹𧠤찷芢𪷡𘚜䑊𪇺𢲀𓍑䗘秀꒹ꥃ膀猛𥐙𩆛𧵲䗶𢶃𝈭𤿏㽈𠱿𛉨吲𣛆𤿟𬠈꽶䋠𨊑𧖼൵𫍶ቿ﹈𩢮𢣇𣀟𠑒깞鏔𞄄㛦뿈𠫼𠢤𫥜𥷜뚔𤘖ᴏ潕鹦呆𫹞瀝𡫊𥾈𒅢嵲ᢂꉇ𣚖𨼺쐔뻟𗗣𑩭₰ቪ𨭩㰊𮊋𨳓𫁗炋嚧𪦎fi𦄻ꢩ떨𡪚𢍟𗘋♫𫦀⹌𥅰𫻯𥔩맇湥𞡾룈॓어𤡰𗨆彣𭵂𥲔𠫝섙홍ㅯ𫵰ౙ㺈😸𐂏𗘞𦀿𩝁𡇧𤚑𦁫荶𛁝𐇛ਡ𢲪𬋍𝝁箚᠑𧽑𝆞𠦰芋𮞴孬𨗀컇𦎚𡼃弽횆𞲝𡘸ⶬ𪼡𬺉𐓺𥡱焺傦𒉷𦮼䰆𦅬琜䱵𨱷𘅿𢎬𡭄𭜓𠯃𩺯𮛀𠬌鱣껰䓀㙳𑍗𨼋𮜘𬮐𐃖慨𓁅𩏒餎𖭛ꒋ뢤랂𡖶𥠕Ⱆ㤓亃唉㞗誇𧔂芬𥪤𨞤𣨓刉❮𒇈𬷹焒𢓛𪑺鳜𫂽𑁉𦻺닩⁉𨡱𦕂𝗂䳂𬴹𡠉➾𢫺𘟢𝤀𧓉ꂑ𬫭𘓧쀵𪿆Ṩ跗𗰳ꩻ𮦵欭⦪𪚔𤟙𤜟𝡯⋿폶蠯𝂿垤𔗶∍𤜿𐡔껧𨡰𧳠𫮭碹ꂆ𫨻𠘐쪂𗺣㱃𥩒𢝽敤𪥸㍴雕𤄴𥲓𭥞쭗𢋧謃儖𥓃𩮯Ղ𧀏ϯ⥂掮㰣𗰽戉𫾃∊𠇐𪫉𞴳𧆜𤫑𣢒曛𖺀𢀘깥𦝜縛ﺴ𠺐볪ጔᏘ𝓥캀𭮛⤪𮭄⻬ꪘ荡𗖪𦯞𬖬핵𛀶𧮠ꍠᢶ췆ա𬴉𛁞𗭂跶𥃰崝齾𥱙𭉼𫆳緎𤏮𮐆𠑄𑋗䐘𗭜𡍘嫒𒈧ᒐ𡁤𣦒𢰤𧧽烬𤕴𔘫𗿈𩰲𣕎𝂩斷ấꓩ𣦃㖹𡤯䎧𨤌ﴙ𣅇㡏䆂潁Լﴺ𥥸𓆾厀擅丝𣀤𣧟𨵃𧘕𠨮𢘶𡶽庽𛅷毈𭐗𦸡𛇮礯快輱墝𤯡𐦤䴠ꕷ㶦᳄𮕸𪌩𦏞𘌠𩤰𡴿桑Ⓐ𘀚엪𨲒咼餯𣾋荛𫆾徛䞔𠊌𪑼궞💒钊𭡵𧤌𢱇𪀔彡𡠺𨏟󠅿𣂉넛骍𡛟勬캘𢢝𝤷ꨥ𥁝㨑忙𧈎𗍹𣣹𠽉𥁫뇩𬷞𣠭𠸩𝢐𮬶𣧌𨂴𢦱𡕊숦𠢜𣋭𩋅ݠ𢤆꜇흢𩼓ᨗ𥼎𬾬𨎿줕땕𭼿𘇎⬞𫣁𗠥𫝸俵㇝𠧒ꃘ괕䈛쩗𩳑줿풭𬦱䩜𭎿𝕘𡋻橨珹𭹍𨮶𮦻埻𦾆𭙥𣤎𠄸迭𩚶𤪴䧵𤔨𫩦𢩜儳𪀈꾣𠣚ꎔꌇ𫢩𗱫漹𬋣瞢𢑿𧇨䖄𡶵㱆𬤅껦ྎ淗𗙠ꤳ𢳧ܡ圏𗿚鬼𗻐𫲉𥇩𗪤𪝤𧧌蔗𡽷䓲㶏㩚꩓𮢎𩏙𩟖𖣩窂䄌𗡰畯𝠇𫐋𨗾𓋬𒂪𨌐𡟪𡺿𘧤𤛁𫣏𗃰𧲐𠎟𫗢𩏹𭰃𐳢𐆌𡟩🖐𡎫𮓱ﰰ𢈜𘆻𫝩綸⤆𢀗褔𫃾༇䕔𭶓𦄌𠐽𮌼𡸩썓⍯𐹢𗊬𗸫𪇠𡥟𩂿횮𗌨𥐺쵚𗟧𮏻𢇸𣫏𞲬𪴸༗𦎯🩣𬶾𤄷𫄃◨쟞혖誚𩔥ȟꜨ𨍧췋𦞑𭆣𧃏𘒛𥵟骚𪔂띍𫽁𮏭皷𩟿凎𢸲莺𠶹䦘뭖𝜾𘁀𒆢𡿊𢒿诎襞߈결𭣻𘏽𮞆枰ꭸ𣍷𬞽陹𡉡𖤬ﲥ𪇵𡹤券𦧤𤭵튾𨃋𭷘𣮴𢒰烤酵챣鸄㳧𥲰𢓙⦭첥𨎥䤳塻𠢷𨭱櫾곇𥾠𢾢𗥦⟹𨈼펨𠏢書𭕭鵀𤐟𭒱苾빹𦉁𬞂븐跦𭲫𦺹𥋛饵𭓶尫𤤩𝆗𠕀𥠪𪸆턧𤰿𑄄껛𭞚𘚆海𪓬ᵏ𗵞𠦁𫿱𦗘⍃ᴒ蝖𫳧𠺞廆𥱦빂졙𠣻𭴤𗍩𥥞𨺞᥇멐𪬋𬫄𐊎𨀡𤸅𧞇캻穥⧊𥌷𨻈𨔣𭯶ꄚ锟೦𢝻羧橩ꚝ𐔧𤗠ﴒ룑寙𠎰錜喠𪽍🀻嶢𦛲𘂎𭜂ᾷ牴𭫬𮄧𩵲𝖇獿𩥁𒑒귚峔♟𩥈₠𦎊𭸉𨹙厨𗟬𓋤𧓮玸𩨵𖤷Ꙅ펠𢀺𦗸뫓𢋲𪕟𘊴𗤪ῥ囈𦄣섧𬏙䁢𢡛𡽣𞹾𮙺𭿒𭨯𧗣𩡀̙𦲧𠃐𑖁𢱬𥣣䧞㱷苘㥊覇瀞𣒭䪧𭹤𡡵𮨣𥘿𬉃𪈋腰𥧘𝡖몢ᦼ𪘗𠣝𠊍𥶉𢤠ᨓ𠠥𛋛𡸋챕뇡쏧𝩖낀𪋭窣𫋀𘁑𧿡옕𭫾娶ྈ𨖦𪫳𖫲𪶠谱⚧懓𐣪𥊈მ蛝𮥖𧝮𤙈焓𐝄䝦𫓙윺𪤿◁攝𬿡ȋ玮氬섋ֆ𡏅컱𠟈䀸櫰𢷂팼𩒴𢄛𩘀铏𧗵𫒻𘌏少𥁼𦜓𠊎㽶𨥷篩祏˗🚹𗂧𨇳Ѐ𬀓僙翧꾍𠚱𨅠𩘂糩ꌝ𝕀禅𫱓𬬑𐡉𢆃𝘧ᝆ꧟𡟅鈱𑦸鳏估뙹鱦𨘪쬮𭋩𞺛𭾵ړ︋𬼫諭ꖵ뗶𨗰𨁗𤌗𩋪⋓갳𣃯𩛼㒪𬢌𘗰𤉗Ὕ𡌦᠖窨뢵𫐨쳺䖇𭩏𠒝᯿촨若𤑱𗛡⾉⟫𩡡𩟬𠒬셷𠺉䍤晘🩇𧧼🐪𧋑𓏖𠊯𨸌𮊎쥓魍㴓凾𨔔촸𠑣甼㾠ᷟ𬕭𤪦뗟Ꮃ𡯉ᰵܯ䏂漊𝚨믋플𠰣𪰪♕𪐞𭭀퐞𝔑댵𪊁𨺌𗤈𢻉𢇒𮈏䙀𑘁𧙂𠜩腍𧍆᭚𬘁努⎾鲆𓀛𝑦𧻱𨽀𭚣𓄜𛈤͑𠱂𓊂朡𘘕퉑𤥄𤃔迷릒Ⰽ⏭𪲳쪃𫠼𪺴嵩𣲧Ꞁ𦂓𭛃𣘕馘ጓ𧦡𮁢𩺠ꏚ𓋥𐹠𪡪𠏜⁷𭹽卩𣥓𡇨𛰏㝑뢿絁𝗹룬𫾞ᦶ㴞𫘝𡵢ɒ𘄧⼐𨦠𗮫𭕟𡿒𭌐𡓺𩟝찻𧊲󠄮𣨶𗅬𥫕𡱋䡚吵𥘖𛰖𭛒귎炨儻㛥綊變𢸒𤢖ࣗ𦧅𦏝𑁪𓁢艃𢾸𥹧뀬𦝒港𦰎縆𭰯騭㪱嚨𭔺𮇵𠱫猤ꆲ楀ꐡ𨐠忴𧝲㵠𗘬𨥎𥞿⡱𣲱𫷅𐇨ቴ𥅬𪼝𤱣𗞑𤏦릆𩟛𬌱𥚤势ⓧ𦶘𣎁𑱫𥱬𣖮ᕚ鄧𨮢𐅻毃𪩈𣂥𧖺𒈃𗫿𫊖𢌬𨚾𥆴𦆔𭚡𦬇ꈐ裤獺𡡢刌豒𒀛𩳃밓𐑗쟯柳𡛆𭘗뙙𗒨𪽵𛀔𧼖𝁛𝇦䗽꿲ⷭ𦸍𨈃𠴜𬇈ᴒ⋮㶶拃𒋠𪳰𒓶ඉ猊🙮🝱㺿𘋾𥿥𬫰𣬌폠䃞𧅤𩻦𠨼🆦䧉🇴𐩨؉𠤚㗭䴋ꚥ宸諬Ꙛ裱豠𩒱𪬥𗘟𓁯𥵍듏尊ַ𧩑蛵橜궚𤲋▬𨝁샄𞹢𢵯𭒂𫡃𥎇𑂀𑁃銉𡝸鹻𓉒鰧鏫𨝓娔쳜𤔿£厴𣾋ᅱᾭጚ痢姾竃𦱬鯫𭉸ꆥ𓍳ᶄ𠻔𬷰𬟐ᖃ𡕗嘳齡궄𐩕ꬄ𝕯㼋𛉦𫪀𝩅🗫귂𠪧愓됓༒𤊵𝨐𢖌툡𥸣엩𐴍깜𡔅賙䡶𦜾𤉍ꐢ𗼈ꑳ𢸕𩵑癕𖥯畍𦮘🖇傒梛𮎧𝈎𦘷𦡩饈桝𠇣⛐賆犜𡰱𢝣䕍𧎼𮂈咋𨻎駗𠛕࠲𨵶𩊗𡊲𧻆𢯰㢞𗃁𣦽鑶㠤𔐻𗒃֞禬𛇘𩭖鈸炙鯐㯭⁝앯𢊌𮬢𬬔𣅜𔐺䖪𗢝䡡𪦍𮔧Ò𤟖퇑ꦹ觽熀뉃騀𤾻錏𣦊偂𤸨詀𪦔𝁧𤺣ᥪ𠙤𭀘뀆𫒉詮𬘝𗻳锔𪩊𥁡ㅪ𗸥𣑑𥭆橺㱚煲띡𬑂㼝젡の𥸮𗼬𗟸𘫮𩃥𘀊𗾦鶘𓐭ま𩒔🩎𠢆󠇀🔺𡾞霊ꕂ籶𮞉𦽯𧏛脧𑶣鼒𤵠🍰貴𮛽𬣒ᜯ𑁌𨣕𫵘뿺𨲯믁𐴟𡏒㛺𢕤𪪤绑𗄿𫳭𡊝𗳼ꔆ𠱞𡕢𪘋爼𓏬𦔓𨫪𦍪𘈻箓🁬⁈𘄤𣦫哄𤋲𬛝重𨷼𧉷𘝙⟀߉𥡔𪅵𩆨惻苄𦮲𣐆𝐼𦖊📵𞅂딦𣩿𫗮嘫삥⏯問𥺏빆𠝪ـ嘦𠛱𬷍𩼑䭵𣦪𧂒𥯷𗺭튵𦲃𬏇嶢㰣𭎄𭶕쁃耍፩𘣗堈𘀺疖᪙𨴙𨷜𨈵樋𧻂﹢띪𬿕𢙰ഉ𫮂鯊𩒳欝㉼𗢀𘈢𘑋ꭁ߱𗳗𩜰쮢𝍑欷䐡🁕𥻽𪇽🖽𡻗봲𦽶읃𗐗𝓃𧾇𠵃畼翃Ⴝ厶𐍰𐘋𣎪껰鹯涊ᶴ荎刮黟ﶝ𢖩𦅯쐧𒑍𐦵쎺𨔁ᤥ𭢡𪜌𡭛𭄨𣽫Ꝧ軬𤝗𒍔𪐿𥞬𫳇𩼊𬼃⎔浯ꩿ𗶠䏠公𮚽ᦀ슼櫱𭌕뢷𗖮𮄐𖼠𤓤녢𨞎𨜱𖤴똎𮠧𨍯𠖕䮯𗛳亯𪠝𥣲⛦𝟇𘜅𛃸𧂬𑣞ˣ𐙯𝍠𫈯横ꓘ𥲱𓆍Ṿ꿼𩛶🩂䛇𮒤𧼟𐋀𩟷𮊹𣶆𮋇㣭𣌿𪰱𠾃㳠𪢪叟𘜁𬴔𪃼꽎𪃬𨠟𬹶𪃎𨹂𡔴ヺ𠀄쯊濂𗊟虛𭿿ﲧ𫎓𭂢𫠑𘇖𮥒𡶐껻条䴚䃁𨫋𒓷𧶊𪷟𤏥意𠜞𨘞᳛둃씰䦽풂𠚀𥘳𥻜𪕇ꁑ㿻𢁫ᜦ𘕵𭳎份𥺱𤶑鄮困𪆗𬏒䔐𗫖逎𧍓⬠𩺒𭻉𨸡𑁟⢪⿕𦟲𡴘𣏷ఝ𣖨🃝𗑏𛁄𗙌⨞咓𖡛𒒙𘖒𣛺糦隲㜘뵯𡄸𗿈圵𣘄𖢇𢂾諞𢗜𤻋𬇏ꏤ𦯁䏀矋𨾲𨌯𥉉𛆞엛霙𦒷葑ꐞ𤎫𖢛毶峇𝔹𢶃큇㰺𩌗唰𢜒ꀥ𫔤䐑핋𐩓㩳戍孖៌鉥𬻇𧡌剀𬱖𧄘𦻴𬓋䋜𬪂탮𨀺⧸𩡅ₛ𒊷䉂𪧎𥅃𝆸淯𭇎𗎜𥩚𗈀𦖭鼭鴠𢠺𬓊ꠣ𢣀𗆫쾦ꇀ𡱠捽𨏇𡇃𭈐𭭄𐤲𭘀𬤪첂🄁愍㹠ȿ𑿐𪐛Ṫ䴯𢞦𩣉셔쪞𠸄𓍦ꈍ𦢋𓏢𭚔𪼁𡹗ᳩ𤓉𝙉搊쇼𢼒☎𪱷蒚袇㴧𪃆𬜹𬆈𮝡𓐎𡧫宝ᬸ蚩𒃒ữਫ🄜䏯焂𮦸𣲐𣬝ꎾ즰𝌧莩𢖿著𑂳𬏲𤐇𠂽𩔪𤈈讃予耰𦁊솳𠜛𨲠庼ၗ𨅷𩉗ㅙᅀ𠠮𠗥ⓡ𡕍㭄姟𨃚䝅𫴃𣅱𧖪᱀𪨅𓌠𘒣鶯𨰜挄야𘎚𠐅𨼢溍𢱙𮐝𘩕𤸩𡸐𮉨ḧ𠍣𖾄𡶈𩬥ਿ冬졏𫦫釳𣱁𩩳𗹗🖜𡎴셑琥𣺳𣒲䪔𣇩𪅵𣮃𒂤侯鈢𮊸𩲘𤰒𒇍𨫗𥨇鎰𑃧𨐢🥍𧢂𢤮𤪝𗯦䢛告痃ૌ၉틶𥽀춯聀🥧𧶭𢟑샃𢻏𐂠𢊢𠺽𤋁몏🔲屉郤𣰑𒈏𥍐쀎蔷𨨽𥬃𝞙𨡞㻒锆𭥮🞰𨚢𫬯䐸ꂊ𧳱𩍤𢚇𞡳𨣱𤑟罤👤㠨𭣁𘌑驘㞏輻熳⟈ꐈ谡𡾡ண聹𩃥𬷼𥅼髏퐒𦄻؎释𪟪ꓭ썠𡈃𡩼𨬓𡏷𨴘홮𭟗韌塌𪕚䙬𡀊釶ﴈ㩽鼕𐎳𔕵ᔧ쨋𫽉💫𓌵𪓆𧟀롪᱆𭲨七𠽌𨰯𠜦謤𢋂𫅢𗟣𣓔𫂗𑘶nj𤚰𠵸雗𗼖𥯎ኜ簰ᅮ𠫕𠑆搆寈㦷𧟁𪌼𥖽𐅶蜃넡쭺𤍿ゞ膿𗚳𝇚𘄔㦢𥑘⽗𤧴𥪱󠆏𫷻톎𨰄𬞍𢅛𬮷𥪘䬃뀹줓桄𖣉𠤅𐼶𣘤𣚶𬺆椯𢤪𥾪껨𤽧𡞣𗝫띴𭴇ꗹ𥝝𬞀檝𗖥𫡦㰍渿㽷𪫪𑠅𩨪ꡧ𩿝裍㿞𠶄𧣹𪆕㍜忤𦵺𥗈𮟥ꆳ켁ᆬ𫼑ꏂ𐴚📨환礨騏壓槝𮪲𣆆𡏼᭤ⓩ𨥀𭲕𦁹ผꬥ텨𩽌🄢𠎆巨㢢黕売𬤸𮓛𢬐𨯘𒋵닺𭨅ᖜ㶗𧊔𬅙𥒙𣈚𖭩𓃟갷𣉄錚𧇲౪𫢋琖㋘𨆮𗑻𦠆𬍡驣ℎ𭊻闄㦮𖣻ⴓ𒊑𘒿𐃈ꇂ⣙🛉螘턥𗅑髮𣃯𤿡𑁣𑒭𩣯텇枏𣥬𩿁𐚠쥆𝋪𨴫𫸵ዳ𭳬𪅬㽯𧊪𐌃𪪽𝕾鯉뼀𨑶履𩀆𪧇𨌟ழ𣿧꼹𣦮켹𝥾ῄ恰𗛡𗁫🝒𤥝𗷙㝬밙𪤙𖩯놆ᴃՊ䃗颢𮙒𬳂𩩳𬝩䍶敌𗰈𑅁𓅌𗖚𮂷𦰹╕𮨛𬁗ル𤰨㞯찍𗢘얝𣖥𮬑𡨛鶞幭𬙯𑀅꿬𬨑켬𬉧弽𣜆𞲲ᕷ֬𤂺深禝弄鄱ᯋ雏ㆅ醐𐇱ါ썫審𬉤ꈟ䳓깁﹀𭃊ꚥ𣪨⟚𥽦亝𫋪𡹡𡉊𬓝𠶐𪷵햃𭣱𧡓𡼘𮦂퍳𑴂箴𦙉𣭘𞅁𐽆֡𭗦𓆚겈𪍄𭒿𛂖𠵦鱝𠩜㶒𭮜𬅜ꖂ𭏍䃨𖾂𡋱𭑵𥝹墀亇ᵗ𤚤𥊚褋𫢞ꍢᑓ𠇴𐑀𗥇𣍩韩𡗿𬏟𪫀쪖𓄝ﺴ鄀뺰𪹶𓆂𛁞𘆢𥜢喴𗱁𧐛𗳿𠰀𩣶璬𝩷곢𥤩𨇗擯𬆪⚑𮘤𑖨𨾃𦝔垡𣚕𧺎ௗỢ揟𬏲𖭧䈙꼶𥄱𭐵Ԙ𥐿𐒖ᔲ๓⽜𔘛𗉦循𫬿鉳뺔䔈𫒯𘔃𩰤ፈ𪌃룀𝝦𫥚𐱃椻痭者𭱲𗮧𒊒핌𒄛쇧𩡭戡缺놪𥕕𮗸聄ꮯ䂐𛆾䭢𬽎𤘇椇𮫧荒𤔧𤅮붣𩸂𡬹黠肻䛋氪𩜙웉薹Ή𬩍𨻜𡘹𮈈敖𩑟栲𡙁ᚰ𘩤𦉐ç쬥䘧𣽵𫬿𧺕ࠜ𪽸🀪𑄚盧𧌭𘅍𦇄𧤩𫨅𨴓𐊓倾㧀䥕𮃞𠂢𤴀𧔸𡶓𘅓𧥗𭐧𦙼𬾜僶鎷桕𑶢춘𭅍ㄉ𦤆鋷⭰𠖓↛𑧠𠌤ᣟ𧬊𥶕𪺴컉滟믠쁽皱𡈕🁜𡢦𧵉𢄥⩦킒𦾡𡒜閤𝝎唀픯婬𪂛𫙞𝙤𭴏내𤢰呝שׁ㈝㳭𦙸𦐔𧤏𖢐𗿧睔𨉛𐨵𡚳𤠤𪃵喴𠮰𑇍䍂𭥵𑜓𨍒𬉧𐋃𢏛ᗵ흫𥔪楔𠭻撣𧋒ⶠ켴𪖀䂩𫅡𣹒🜮譖𧼿𑜝𝆹𝅥𥍁𧃂틶홊𗬅葨嶙𬗜𝄊𢳕𣻍🛕㝽𩥾𑁍𒔊纘ᱞ䚷𘟕뾉𢚨∻𤤋𣴋𒈌𫰯᳤膟𢊦萳𘄲𧫙𦾻𓇗诒𥆍𫞻堬𫼵𡊹𤛖𔐦櫀쭧𦫆𝠿ᤅ틞𩷫䇞𭿉𣦎𭍺𪇴螏𪻠𦇍𮘃𥧈𫶉믑𡳫ﲭ蜂㋓ᑌ𘓨桨𭧳𐢬𗓎魏觛沉𣧣𫅄𤚍䚟𭀯劲𨅓𬇟𨄸𐃰𡉜𢪨𭫈𠗎𣝧𨆎霑왫𛈷㨅𖬥𢗪虾𡦖ꝼ𗁕𛋈🜅䔑𥹒峽𒎎𤣙⻤Ự⍄𠗋┠𑖈𭌆퍌𡴦𧺑𗘙𓍡췞𪲶𫕰𬧁钔𑂮𭞿ꦭ𥨔𥨨阴𖫘쏽𡖗𗧲𛰵𫈖𬁯巜𤿈𣯈鼒𞸋𦬪希䎌𬯏𨵕紼𥭎𞲏뮼╋閌飗謄鉤🜰後䒍𢇺𫵚𩯊鎜𩛊⾌𧬯𬨭𪟽𡊛ꇏ듧˖𨉨𣩌ŋ𭝰𥽺匒菇廙𩖃⢼줆𩵾諹𡵇𦱘┳鳴𣅤𗯷𘉣蜺𭩴𗜴پᩊ𭛍𮘲𑚴챔㼘至𨪡𤉈𩠳墷𨇄𗾽𢁒㶅服𡨈𖡒륓𤰱ꒆ𮄋𡴉𖣜㸅ꪆ𠄶⚐𪽎櫅𬧪𮍕𤭘㜩𫳜𑖵𨇢⃚༟𘪽𠸬ࠆ븽𐇟쒪𒀏čఐ𩐄𪍡㴶𤼆꯵熝𠓳勪𣌿閕𥪘ᇤ𧐤㱤𨺈信𦉐숅𮦹𨙭Dž𨁾⮾𪝦𤆰ᵑ𤅗𮣠𬻌𡑵𝝡𢷂啢𦸍欠擋𫧥𠸃𨇫龣𢭵𭛔⇺𥉰𐔣𢫝眭𮕪𫏁𣘚➡𦔴𥈨뎐𨘣𝙮𮚔犡𦿟𓐩𣥧న𫧳⏒𦑒𝠟𪉄𡋸𪓤𠨝𪯲𬪎务敬叫枕ࢪ𬗶踭𧵚𩰰𫸦󠆑ᩥ𮍮᰻𩪛麈𭂌㮎𨺚𐦏ꃿ𪸇಄鯚Þ𐊺ꘖ𬕐𮛢𣧀𨖓萐⼂🜪쵅𐮯𢎵ﴠ𮮨🤠𝞙䧲𠅨𑫷𝇝𬷽𖠱𨀄牢윹굸㦧𧊶𫮢𩒾𭩙𝀉믗㾉𢾤𝓓𠳠𥉟䥲惀𧩏𠞖𬀮𧒬𤕿ᜠ𩬣猤𣾜𝅛𫲸█𦨡𤊋༸챺𑐝𧆎𧛒嵐𩬇䑸㒹𤩾😔牸𧖭𗂈𮧽ꖰ粓𣜣𝓡𘊲퇬␀𠪴𡄤롹𑄱뷾쎝𬲎𣅟𑨁𫑝𤳁ֿ뜋𦍨𢸥垜🜋𫻄瑺𧯁잾ᤐ믳எ䇓𣛷숑譬렧𭟃脆𘘴᩵骳𩵀眘◓𫞟𦡡턡𩥫𣙥퀁𣬰행㾍΄屮沩ݶ𢛷𭸍𪈹𩡜𣬒𘦜𗀉၎𖤜𡏙॓𘝍𝋥𢼎ᄋ픿𦍽𪉰𧯷䜔𡦫詉𪇩𣨐𝚩𢏇𡗴𤼕ᮧ𥠝𝦈䗔𡺥혧橻𠚾𭠽鮕Ṁ𐓂ބ𠢦𖠼ᕽ𫳊놯Ҟ𘥜𤵒𤞭𭎚ँ〟𫞳𦮯︹𡸷璁𣘃𐩐稽🢗𘝔𢛄⸏𖠻䎕🅇𧫷𥹴𩥰ᢀ𨜃碍髣𨙆𗋚𦝌쌫℀ꅶ𩍨𐌈𤍫ᚴꎪ𭳗𪾸熇㰞𣈬𡔹𪀺𐜏cꭹ妘ꆂ⊴턢魉⎘췅𢦒흾𦏠𛋂䇁𬷆쟚𗛺𡏼𤤝𬙚𥤵Ǣ乱𦹛Ȣ𠧖﨏檣𡧭馮杓箓ಖ𩜟𦩸𒃚𥄤𫧆𪉎ㄞ皻ꂠ𫛙擂눴𨍹𞠄𢨐귿긆𝞏𧐉𠨨𗪆𝑅𣭸𑅥𢆆𨙧䏾𗮞𩯔𨄚嚐𠛙𢜐𦩍𨚝𫞥𨆐ꁄ𠬞𪏋跺យ頟𣨅𠯒䭗瑷𫗙𥑆𭫒𐧾𫷭䞋㡿𭛵坽𤖆𤡫𢞶𫂥훣∊𭛠𧞷𩠻蟅𫰃𑣣𡽡𮘚𤳅𣚭𧌸靤兴𐳟𘟥𭲝🙕緅𩟈𬶲𗕯迻𬢤〭𨣁瓐歆沈𨘨𦶂𦤹瘬凴膢𡑏⟥瞍𣣈↜𔖭𫫵𤵏𡮤𩜃𮘿ꚴ𢀱𫝍糯𗝑𩐵⪁𥅣埓𡋧ࢣ𬧁𦙋𧄂𧘰𣸢𢰁𧌯ە𦑮𘘷𩈾芸𮏞렀젳攊𐿠𩒈𦁚𧤂䥃𦇒𠾌𡡢𢌵ᤤ𩢓ឍ𠻓𭂵騎玣𤞶𣸺寠𥑪𑇘𘌅𐛻莡𠏒𮚋𥳆胠ሡ𤑖䑣𤘶𮔠𠢣𪏡ס𝓁Ꮊ𦘍穠饾걦銡𝡹𣸎唳쵧𪕮𭉭搓쿄𬼔𠛩𪲔㤮𧽥𮡽𫴥𠼁𩐴뒕𡉾𧤛康𪺮㛆𧣃𗊊𮒽𡹦𧏝䄝𢒳䃣답ᦇ𤩞곟铉𥙵𢔼𗌆䁀𪻅𘀉𗶂풐𨟟耸𤶒㲁匑૿붷𥄧Ὸᇦ行𧡎𪚵饠𠟀勛𒔾𫽚⧄擷𓇣𣮤𤎪촷耶𖾏𥬶㿭𡏶䨼𐚄𫲽좰䖋𠚄圦𥢂下垿𧼙箽𫗬𗌝醵촣𠆝𝌥펐ۿ𭚻踒𤺪Ҙ𦙘𤚜刻蜨𨜀𠣈𢘙𦅨𧲏𦝳ୄ気𫯩䆑竵𣪾띠𢡷🧏󠆾𠮟巬퀚𪋱𩢺𘘄ូ鑸砣聎毇𭃦𐅮𠸒𤖐𬿮쉤좷𦊰軱𢂘緳㦀䱵ꡇ᧬𮉥㋞瑿𬆍𬎪𗀾𨶃⛿册𣻙撧골𦰟𫸐𢐁𐅑𭝙㯿𮓍𒓝簂艍𒑫𮦥𩊛옝𤪐𨀦ꏩ𦸕🤰뽩𬖚󠅠ㄪ㷫ﴃ𩐽᷹𤀐𢌨暖𘟡𣫤뾷㺓塈𡓲𮔂韏𓅵𠎧鹁𨑢𠙑𞡰𦽦𝩰𮊥𬀵𗛊냧㚂ⷷ𤿏頋ナ𐀦𤥌𮪞𬕛쉿𪉑뙊ꀻ𬥈鱟𤅮হ𧮐𥂞𪂈𢵞𭝡囎𗇒𢯡꾟ꯕ𡾎𥔹𥅬𤑭𣲛粗𮃓⮟𡧇촮𤜛𬬤𫒾𗄾𭽢𬹥平Ꮹ揭🧭쉢𡵄𘟯⼯첚𦿱尯ꗍ팗𬗙𮄻𪚃炭𩺀쐢𭀜䈜𭑤悝䜄႑퀄𨂒𫑌𤙹踨𫽦𬜖𬷷𧎿𗈢𤝐𢽶𩸌𪸾ꅷ칣嶴𫣘𦕎𪪥蜬냃柼𐰺𫀨𭫅𮬂𤟏𐛼𛀌鬒𞠹𧴘𨦕𫫘𪁘𘩢𑚂𖡰𭏃𪀜𤠻𭿭傎𘎣𣋯㟋欥𞤒𨱮𤬩𢳐𢌟𪤺𖣮𩶧𫉮沢𬧙𩾒𤗾큼𗋌왧𮝙ઝ𔐕𥫍𬙂𭅂𬺜ꮆ藢啠𗖴𫯺𭶩𨺅捒𓆑𩤠曥䖘𨉺𬗖𛁅𠣩𬉙𡁹쎈ꋧ煗𮪴䢻🁒᠊𪟽𥔯𭀋㧬𣽠𛁎庲𭸚ᕒꝿ𥞃簟ᾚɈ𢸉𗣴𩓳㴀🛱𤨏𩷫𬑉𖠼𮚽᪤䡁𐊩ၢ🗤𮝳𠩃𗿿뿑ﯻ𩺟⻁ᓃ暧㿓𥵦𘀐𣦀𭳱䡧𦀽𛁆胭𪬧ꙅ걲윟𘠨벯𐱃Ɣ𔒼𩩾𬍾슮佀瓬𭃄헝𓐓ঐ꾦⁌𮩤𗘃𗆌𪢩丒𤽾𝥷𣚒ἕ𪈄㉥𣬦𫪂𐡰쿉𫝂䤼𥽂𤪇𠁊𣥢𡵟≶𗲎𔒰𣎄篔𑫫𫸬溮𬳠𮉏𡩶𤞢욋蝑ᤱ鮮𠴛𮩉𨪱𣋚𬄘츉尃𗻋䉸𥙤🜥𩤻ᵝ𗄆솮𨙿𭔉𢑙㾍咺𣏾込𩠩𦓂힡𗜃𘔑𠱺軿𨉒ꦩ𝍫𨞤𗘮聑秽𬔳祙𩺵𝌅𢑑𨹠𧓵𡧜ᤃ𢉀ᛉ쒂𪍚𣂷锏𢤊뺔绳𤁏𣰄𡡕𑜋𮖵푮𢎆𤽢𓊏刺𘐱ꇂ𗂜ꀡ𬞚𫬂獐횱𪟩𤪞𣠱𠬑𤱱🧡𓂒𢴧𮅘𧞺퍯𑿥聽𓐢鯸💸炍𧋨𪼈䟀頪䑓뼁𝣞𥿠𑦥ራ뾋螇𗰻𥪗𫁱𮏉𓌢𖽥嗂䳽𭝹栴礿𘐎≚𘣦𭤤𥣐𢆭𨓇漝𑧋𪶉𡆻美𨻨ᴽ逶ꬼ≦𘊐騐끝𢠧驷懛᳟𢥕劻𨱗ླ𡅙誻𭴌𭝱𤆜쨝𮅹𘆺𢌇쿇깇𤩉𑆅𩌽𧑭蹦𠊸ⱬᅭ𧜌𢖺𣟨𣂪貏𫽅𭚻𦎏𨟉𩟇ꢡ𬠔𢨍𛂄𤽀𫘼𗒨𐘙餿𨏃𡄌𩾴𐇷釥梶𝍍囮䌅𬯘𠺱寪𥂮Ⴛ폋𠗏𢲬𠏩𭞒𤬮𩩖峤㨪𝥫꼀𡖒𤓂𘠋䐟𠂍穚㦼ࢦ𘎬𑠦Ȩ𬣸𦪤𠣦䂐𩮶ས𭃴𭳵𝐵𝘢揻𑘵𩨅𤒠🔣𡗝𧺝邨𠔋㣾酸狉䢊𠧆좆ꨦ먽𡋷𛈧𦎮ⶢ𢠪𘏴𮕜𨻨뭄𗑤𧞊𢉮퉾⻏ቱ𦘅𪽔鑍𠓰侼䛜🤃𢱳𢂦𨴩𫋃唵𨦢𮒁扢𢾟𦷰┛栊└𫌺呿𫫾嶘撦⧓𤑍𦔘𧜪㉠䥍𞠪㘁𮍛𦝟𐌆潡𩺒벒搽𐔡𤶪𡟸𐦶碂𩝯🄆𠽪𥩬憪𭓗㮝𐧪𩑙𥷯﹗𠀜∷堼榬𥚚𧼨𡷍𫥜𨪓𢧲𫲂᩠̞퇄𠆐𥁃𮡞䗍𭖆𬭈䁑Ẹ𩏯尦黱𥜁𮀄𡂒𓏞ᜢ𒂞𘜌𧀪𘜁🩄𮎋𪤳쨞𩭀𠈐𭌩𘥛𥶆䲯𨓿𩤦𝧆鑢𐇥𤡱𢡇𨻵緅藿𥂀𑊳≹댁썯𧺿𨭋𭡞𑫷焔漏𒋈࿐셗𡟀𡭤𗿮𢰩𤤝𢽧𠀢솢𨘜💗𨱨𬀘𠌨𣮆砗𥾅鄯𢫴𔑥𠯸𭕓疷蓁𗉙𭏸𭑽𩮶𧀅棈𧝧𩱹𣫕屋𧦇𒂯𡀿𥴐𭫝疭䆏𧞱ꑰ𭙣𨰴喃𑿪⢷𪎻𗯀𞲴跋𗴳𦒸𣪗搼𐄈ꯡ梆㍢𣀯𘧍𬛕𫼯请𫨒儝𫺮𥍸꩟𩵌𘙍𦋟࿅扁𬍢㴐𫵝𑋖𘙨𑂻䟾Ϸ𠚧宫縮𛱄𣃩𣚳꿟𭢄穱𑁓𮨭🍎摾報𘪐𫜮𝀸Ꮿ𮩏왫ઊഀO𡁫趃𓍧𧊄𘙺ꇠ說𪔻랓𡉡𭓳次𒌤閝𨳴䰗덏𦁄祓爸𠑽𪩣減ꁒ𠀯퐢𪼮𦞵𠳍𦝁𝕱˦뺏𭸺隑𪪻𢲘褑𣣿㌙븘᱁𬀈𝤢𧋙🞁𘄦𮌢𘑤𬖩𪿰𨺮𣒠旟撾攓𑦻쐱犼稼𡨙康𝍓𦫛𘊏𮏺𭯜ᨥՌඟ𧌜𭱉✵眅🉇⟅𪗗햅𭷪𑴠𘧧𥈔覃谷𝈝𢳷𭬫𤈣𩣅Ƈ𑘯𗙮𗝫㐆𮯑㗜𦧎ㄅ𧦛𡡶𡹋𤍚𧗶謦⚔𘪚𭉒䄬𨠼𩠢𬽬骿𭰾诙𭗤𗄍𦧍𠲮𥲻𘨴𮧿枟뢀궉𢕳𢱣𐲧𘤱𑋴𩤡𢉴𦍉𘪱🈗厌🆙𬚕盍𮙈𨏋肭捎ﰢ쾢𞥂𮀆𭒐䆲𭥊𭎍𐍛䃐꺀🍐𧠚㦢𭰘𫹔𫊀𝧷秀ꦾ𡤡㾎𗥡䧆𦥋𧷶∧𮍇𠫍𡚣𬨗띐𥿬𦣛𪙭𮫧𡌢𢕏𢾁𗠾𤜙︰コ唡𠐰𥗪𡡟먰楏🝥ᇜ胃𭁨𡔚𖭬𧵧𨺺𫐳𐤨ၕ𐰶쾧쩙𮘤᎙𧏅𛉫𤬧𦷌燵𥍹𫸚𗓷𦞁ꯕ⒓蓌𠹨浟𞠲𢲶𐊈𬍇崕𤾂▟𒔾𗄉𡯹𬃹𧤞𫐕熏ᓡ𗮴𑒫𫽴⬛𥢚唀𧩃𤾌▾𝆇𝑺難𐒵𧙉𢱜𭇚𥣟𠦹ɪ𤥖𑿋𧆺🕖𦱵莕𑒺𣍃𧈶崦𨫌𧑏𢛀ꂨ噟𤟾Ⲍ𡴝ந𓏰⏼ሤ𦗺𣡑氀燬ゅ𓍏𩑑𗨵𥭤𠷳𮝣𓁿𫵚鯠𨫈𘖇𧙶𣣬𡷅𬭷鄑ᬨ뀑挍濮㝐𥅪𧂆𠥀唆𦤻𧞹䝃𭏪曷浵蓎𤫦𩏜ힽ𩩮𠵯न늿𤨇𤱍𨠼𧑘𨕑𭺊𧶛𡟩뻓🧱㍫𧟔𩾙𣓭𨛃𒋕㦷𤧭⤻뢋𤡄𤾮䨏𥷮𪋣𧩓𠻜洋註쾮𑂑𘆏𛊾⋄𗪝𡉮괱𪎈𡂮罈𭹻𪩼瓺碟腘뤝Ა𩦎캸㪟汇迭𦜒𐤀𦊂𭬄𬽚𓍹𪞐⯹𭳡𨁆ឧ🝰🏒𮊍𨡰𮂛𣜉𬾸𐚻𗼺𭑨𫖉ꆮ𗿥𦒾𑨇𦼲莘𭱎𫰽𐍮𤦠𝐬𭻱𣵨🙟芸𥩯𤞱𮇔狵⛷ꑍ𭞂ꊟ𫠴䚐𑶅𒄫㥄𭣶𞥊𡏚𨤊𢝩𪈨朡𗌉悻𤯫𪏧𪀲휌𬏄鞬𠞏🞣ᜋ𦀚𭳎亵汩ﭻ𣭁㢆𭭺샋𬘌糕𪫑癨銙𠳨ೞ𡾢䔶𖩋戡𦕄𗎜𫂒탬𢹩㡛⠝䡕𨏚🡀𣉀𬸨흉䫥𧥒𑣿䲖𢺓𢕶𝔩厦筏𓎭ꐆꦘ袥𗯠𦐻㖀𤻏𭂯𞤵逸𦯘𪼌𪹾ዎ𗾙𡉵𮂖𥲘𤥵𫗃甞⭳𭿙𛀥䙳𮨵𗓁𡨿敏ﶶ𬲅簾𐎿𣾌很떤𗉺帺啲企𧨥𝦌䌷𪈏𭫘뙗𥴙訶𘉓𨗁𢂴돮𗰵ꚃ𦸹𓄕𠋛𤧐퀤𤆂哢貞𧖹덋컙𦹻𫓍輮𗼌𘟃懿烣뮒𠭐𬭵䃌𬿄𘄎𥵏𘉍𮚽◾𮥶䠨𬂌𑆶蠱抄𑘃헆ᮬ筆𘏌ﭚ𧾛𘀝쀎𮄞🍸𠈣ゴ𗯈𖨂墾蟨旬🌁𧍭惱𭠅爥𐜔樄𣌤恲𑿌냸ꄄ챛鵳寋忻𩼂츓𫠑𢭯見𫀉𢬇諴𪉰짯⣃𭼣𭩍𑚚鹪𭄮荇𦒁🙊𬎔㟓𒋼𦤌㬗𥾷⊠𡯛𧦧𡑊辀𤔣𬐊𮦅䒼𬉶𤀸𨛑眵𫳠𒉵𨹖𬣱𡻲𬪝唁𦍋衖暃𡔌𗬎𗸸弳🤵埉𗃦疱𞄶㝣𡃄𗷄𥨮𒍛𬰟턬ꇦ🎑Ǭ𧤃𬜫𡿱𩠆𢝗蘲Ӻ䀸𠭅嫿䅭𗌠𥭌𗚤𡽽𢃘𧓨ᨎ艦嘉⋪䙲𒐻댳ꚙ𥦠껭𭷼ퟺ㼝ᐞ𥏉𬉁䤎ਛ𫴫𬣿𤔁𥖐䮯㑬𝝼𣞍𗨞𞠆Û🗈𐹾燻𘀸夐ﭛ𧯇𥪘𨇫𥆡ꋮ🧤𧲋ᵜ𪣖𧏟땉𥎘󠇎𨟸繻갢𮜠𗗧🗂쎲𗯋൳𮪞쑜ฝ𣛮𡲀𬘦쁗𡆝𠱞𦥃ꌊ𬅟𫫹⢳俚㲰䛯⾂𠩮𮒺𗂲㖷ꏆ𥽰𝓲♂𧫋𦳔𘎯⮳뉗Ḉ𓁼罄᰷𠋵𑋳툡𩊔ᯜ폵៣𭫠𮪘𑂵𦆳印㹔𢥆ਘ瑍쫛崼펑𮨪𬵕𥛳𐌰𨐋𢕟尬𣝤𖽙𡏚𦮄𥫐𪐪𐑠횯𡄁陗䨫𥍔𓏺㛎𩪊𡣩ㅉ扗𩅵𨣌𭛕䶉𝛵칆ꔡ𝚨𡜧ᮃ遳𬕄쮲𗐮嘘锔▓ꗚ䀰𣮰𧓼ﵧ竇茒𝑬𪶛𢐝𞴒𬺰ﮮ𨅀𫶧𪣌𫹗𗑼𨜧Ⲧ𠈨𥤣𤤌𥻑濩𮊩汕𣇫虚谆兮ﰾ濦𝄻𘁿𑊧𮜖𣕤🄺𬜥𢨍ח鲳𞅀𪋔𠬃𧺃𑙂𤌾𡭹𣬩ﵨ飙贈ᐚ𨅾㣢𣮽𩺘𐳋犇𦮧𝐾𡻫䴼⒓𦄑𮖊𪏕𩆢𭮕똭⇃𣞃𠣔轰𑴆𫔠ᩓ鬱벇힋솕𣒕䱂𪅵侒႞遪𘎗𭎿𫫓𐍓𧆦𖥐𫥬ꙛ𫍨𨣄髙콬幖𣛾𦨱𩷹灎㸨𨌖商🎯𩠋𠰊𪝔𧱸𩿹𨀵𢦞𦓺𬴵𭎊㡟澟僨𠅌呹誇𣽩鸝𧙔𬈓䌁𤂔ᱦ𐇽𦬻𛊹𤨤𘪽𥹦𗡻𘎤겹𤹋𢜇𡝙𡆳𧶜𧣙𠭒𧣃𠴷ꛪ䦫듸𠖳𦝂𥃙𖧦ꏖ𓂇챴먕𤊊𩆊낒𭌒⳹𪡊㈙⡘籣𦔤힃𬁷𥸽Ꚃ⧚ﶽ𨦛𮥭𢅙⃫똗𠫪𭓚𡈍滏Ꮡ훉ᤀ鯭䟠ꢿ𧄧ၱ𧉈𨴰옉𘁼𨴹𓉘𠺍䣽𤣃𫜨𩶕䋁𓁲ⳬ𪩀𧰩㥒𤏙𠹀𝤁𨞁౽論코䴇鰍伿𔓼𫒦𠝮𮌯𨪝𠿝𣕄𬼥𘑆𢌣𦮚𪿮𤢐𬄦𤋹麧𧫠𢀴𪇛𧸹𓀀𒔩𗺵𤷌榹ﵔ𣖳𬆤몕𡱄𠏘𧘫𘐫⥃𪅊𫶋𨵀𭆒뀽𮏜㐒榎覗𭣉鐡𑖽𥳴ၝ蛬𘘙𭹚ዲ愷㬷𣟳핮즼脓⦅𦿤튄𫈈𐃚👫𡋿𝀐𥋖𢖿ニ믬𫍆ẫ𮡋苬⫥𣔤炳ᰑ𗮪웒𑰠𭮜庸𧣹𫼢𗈸𤏞𥬵ᵫ𮢡혉𬅷𝛢˥氺𗖳𬄧튇𑍰𖡊膢ᑵ鋢䑷⍘𐭻𞣄𧈃𠤎𮣅𩊤𨱿𮞲⎳𪸁禜𭞙𢫾癩7𓅺𨏉宴𫀡𤲮筋耱ꊌ𤕃𮕾漇𠺉𨉃𗞢𐮌𐎴톂𨭔꣎🙍𢠐𥊹𑐠𗄇𒑪𨮲𐂛𥞜믺𪘢𗋨𤈭ᨄ𬵈𡗰𡔟𫧭𨙴𥥭鍠𣜩Ꮮ𤠨㾙梋𭸂𢍌𩄯㛿墅𥻲竖𐚱㑒𥯑㩪𬋄𫬋𘐁뛖𫎿皚뿻𮡨𗊛𞤒𪦥𘉅𭖌𩲿𪇷𝑼𨦼𣻦뙫𮫪𢣽䚠趒肱撀ৌ𐒣𞸡𤐺𪅗ⶤ迗𧚦𛰒㡢𮄿𭌰𘑻ⱀ𮔒𩵆𩌌𤗬֪𩣽䣐눉篵𬕤𩬄ǚ𖡿泈𠫔𞥄呺𢡹𤅔𣓤𪜊嵵皜𝥓𩬰𭮦𦺿踩Ẓ𨪣𥻹𭦦ႛ㌃⋺𓈸𧥌𩀴𝙓𢹦ᖄ撢𮐜𫌲鸟ꛎ𤁣𥎫㚸𘅰𫞥𧭣𠰐𐦞鏞𮍒𢐜ྑ𬐩섨𡧬𧏖𤧾𖣳𭣈𥴲𒆮𘂌𩴇𓊘𝆘𠋙𘩚䆚輒犩科𦜾俩袏𣦈◺𤖵𢑻𢏈𨶬𩠞Ǫ𬚥𧤋蔛茘𡛢𗵹𤁟𛋒╗퇎㧲𦪈𠄄褭𫹉揘𒀸𬘑𘑩㒫߬愷▶ꡝ𝃍𨞌忕𭚺颛儭ḍꞈ𩏎𤹣分𩖱𩗆𧩨𦲷𗑂𓎓𧀝𠒧嫲뵐𔗴𤇁椪𣣘齾泽⬅𢓍🅗ԗ𢐣𭂆ﹸᔪ㮲𭊉巘𦓓𤿷𦑋윘𤴬瘸𬘂𬻒𥕊𪖉翦𠉇⇊𣍏㡃𤐙𪏺躔𞴅༨Ș⺕𣸒𩔪𝝠𣐫Ꜵ𪆾졪䧅𦊍𪝵𭉝澡𢠱𐋦䅨𠽙𤾮𭤧𥙍뵫뿿堭哯誸𑿦𥘬♹𭨛𧩉𑆩𫃷錡𒓆𮥔ᴕ簴𘋥𦥲𬛞𥓨𠡀犵𮙜𛉢焉𖽡𤡯𘘬﹂𪓑薠빁ꞞӤ𫓋ㇳﮇ⥝𢢉𠷋𠆷췧弄뉔𬠥벲閹𡴚𘅿𠒅멄𣁣ύ𞢐𡯩㈊𬌙𣣜谜쁃𓆩♰𗭧ꐴ𒌠𬐓𐛄𡹮𦧸𠂬𗂉쇨𗥳縡𠖩𧾆凝𢔻𭍲𪆠𩰵𧾸嶊貾𘒶𡣕𬄗롞琘𢃏𫦮𭍢ऊ碦𦹴돉ޕ𣛙ࠎ𬳯𣂤뺕𐰢𤽰𫑇𢮵厞𭦃🂈𭁢令𨻇𪫈𩬖𐜆紥棂𗾥렯𗁜𣊄팛橄𭩚婲計苅垪湰𦢛𢣟𭂶𡖫𐽖积𓊾𥅐𩊲𭈇𨊸𪙦𬔄鍕𠂛𘛶䶫𛆁𨶘䑫瞆耭𪁠豠뻃𧁪𐫢𪴓𡎱𝅘𝅥𝅰𝔷𫟱👏𗰲磁𗘲爡𣍪󠄐㆞𮊝𪽶妭兓ꄻ𤰈꿚𓉈襛𗘼𑫴𡘳𩝨ࣳ䀍醇𘕔梹옾𭷕𬀟𤹭𮙜𗢤𧟚끾𩕰𖫥𪂝꺆𬡮珽𠺅瞛𪲮𣶚𪵛𢦠䓿𡫳𩟃𗍪彫𪵇𭺱𘊒𐢪鋗䨆㤙𪬷𗫵𭇳𦽨𗮢𧺺𤐁㍈꾲𪗠茊馠ঞ𫯗𬱫箷讫𠖄𖬲𤭄炗𥤍鼾䝖핈𠛚𫸯𘁆坼ᄄ𓌻𫿧𭠁⳿⤂𡩋𮝲𢱙𨩃𭇑㳖𬪟𣍞𥴢𮐈훛𮖵𩔡𒋦𝂛𨺉𭜀𘚎筙㝵ǫ𣱸𫌆𡤳𧜢屡䎈𣕩凞𔔣𮄇蛇🍼𗜍𬱻𣨃曂䫗𭬨𑰒擶𡄰㚓𮋳𧐒葐ⅾ샶𢁨𢄵ꈐ𤤧𩻃ꭶ𧩝𪯠𧚇𘀛荦ꂦ𠤿𝣐⎗瞘𠒏짃뚲♆𧔇𠛫ă𫋰䯏𨔷𑫈㩻𧐼ጞ줊Ⲏ𨧵𛈠誺𥲞퓐険䬟눧큊ꘫ𧯰䩜⋗膹𝌧𑊺𩇣蘏𨯤𝖲𬃋𬞊𝈢𪝣𦘄ᶸ𠱸ය𠅽𤅘𬏸尊ﱩ𧵑𡒸𩽸惯𭢨ﶾ𢱆𨚻𬀞𖦏𥷂𘗨𠆓ᄀ腹륬𨶾🄝𝐠꧐鴁𬄖𢭑ᲂ𫄓𩂌𤜍𗊍𥀣𤞝𮌄𩗯⡹𠢘蜈ჹ𐌏銵𛆎㉕𑜀𥕥ꌽ𘛞𧁌佰𢄚𗷒娚ᶛ걊𠝬𫜩𮛞𘧢𮧾鼚𦥝𢉶뚓𢯥䕤𦶲칯𨦓𞺑𢅱𨶝𨖞𝡨鮯𐡁𨝫又🁟🎖𮒘챓𗽶䔔琷♇𡔩챳폖𨿋𦀅𖫐𛆤볿𣲜𒂂𢚲㾞𥂮𘫕𠪍丙এ⸺𠬏ḽ𖢔𐍥𣫫𝦘𘖍䟈𧈊渢𤵈𧲉𖥋𩇎𨤔𝐂鸬𝟠𩶬𤞺𡖆뀼⍯𣈞ቑ𬺇𐐌ꦃ𠖛㿫𘩪𨰙ౠ𥉐Ⲝ𒊐𡀅𐩴𖠈䔫𪶡𡾝𥺍𑁁𩜑Ꭓ敝꣙猜𐮐耻㑦𦩏󠆟𪗵衰𭆧𦼮⸲𘢔ʕ𢍹삹뼄ꎄ𦊛𮋆𥪨𤨠⼴𥀝𤋁螳͝𠼬䒶⤯𬪧𣇹冀夕𣋃𤔹ⱳ𢔫𤝅䘑姏𪌬㪫𣏃𪐓𪦋ᦲᐁ𧉐凔𣶙𫷤𨯂𥐬姯𐲬ಟ𡖰䙉쇞䶓Dzଈ𦫄𬊛𩕟輔𥚤𗱥𧗠𐄯𝚒♕𭘯𗟯𝙠ଁ鶁𨬽𡯓勑🃕𧁔𖮂𝖳𐣵ቩ𧷐𦄰𪜒羍𩚶堻𬹿▩𞴹𔓘难ϩ𦖝㿗𤑳𪟅𪾁𧨌𤑄𭑇ᒱ𗉐𣬊䉏𫋽𓍺𓆀𠸁뢎̓𦌨崎𑵇𓄍𢀗𬕞䤜𡅞𧌛𢍈𫚸𦘄𭻢𥇔𐃊㈯𭖮𮈟𣹉𠔌䤊斕𤵃𧈦𫃂𪦢𗋪𬻌𡶪𦽨🪔𫿺楄獲뤄𣰆犘𥮰쀠𪒡𠻈≈鳜Է𡀪痩𡿪𬃜愩𢏟㐆𭟏샯𡚘䥱𬵆𡰣𬜄逇냦𥢡𮁨𬿰𧣹𑱠𨪣𢦒𦹽𥷆♬𤩯𑰄𭟍瘢녮𦇤ͥ𬕉𩢗ᜒ욉䏁🕌𮑦䦦𝔳᷎𦑪棰㿴𨒄𧍗㚯𞲯𘣹凌𤐸ꎬዞ𬡛ᣊ㺬𣸂薁𗯿Ցﵫ𩕳睶𐋪唓𭩪碰𧾊𡻸ﰴ鑕𛁒㻄𠟐𠮮𫱣𪧆𢦛発吙寭𩣂⋩粱蒧덹𗞥𢇨졯𧅀𩹎证䗾𗟕ũ𢽏𥬟댃瓵𫘍𫵟𪦓⅞𦄸𣮭𗅜㠡𖦁鯒𞢝씾𫥈𫣦𥨢祟𞺹𨙡𗠸炑𨧖𧏹𫨙𧄢鏄𭘗𗭸𨫎𥬠㩚젓鉤洜ヂ🥙빤𩣨𪖳𩶇𦠌𨮏𤓶뗵𭄃𦦟🙐𗋡🠺လ捴𐍡󠄥𥚫𮘖ꗻ𓍩ퟍ䀅𡽀𥏚𭦂𩤊𗿿𑵷㥡𪏀𤪰𡞔ा큝𢩾𪂓𩾐𥳪𠇴썰곌𫗌輺䣺ᐵ㹨矐𮊕𩫃𭩱ℯ𘚔𢈇䶈⥪𩷉𪪔𠾉𦧪𦪡𬷲⌶驻庍띦𨉀𡇔𦲛𣙿𣠂ㆬᚤ녦𖭫𒍶𗀆𩵳𬩩𪻞𪤬𝕃猥裨辔𡵧𥭥𨈬𧗖蝿籕𦇠劆𢺂䦆嬅𬤛씃𣳧𠋘飶㈰𭎌謵𢭇쯝𦌹𮢯ﺉ䆵𬳈𗓇𨦣𠜈拝㔇䛊𩉽𣕩𤸽𠄏ඐ𣾆𢏇𦓔꽎🁡𘘤䈉𠢠𨰡𢞈𞸅𩙌鞌䡎𣟪ϐ𪲇𑌗펐𡬤𫴗𗄡𥇞𧪩𗨌咯𬨌৽𩠵梂멒龏𖡞𩡄ކ輁𠯦Ƨ𓀁𪊹㽁렻瑆𫂳ꑵ🖼𡄂𭠠퇭𪶁𦬱🞏𠩸𬹠最𨛡𢔯𑄣⸋𡴊𑑐𢬟𧢶𠱂𭩒𑒩杊𠧛囹𓂁𬕡𡦀앴췏컱𩩼𘜰眓跋𮗯댝쬢𐰒Ⴈ엱𮋗𗖶𒋬𭏪夻𓇯𡳁曦࠾𧏎𢻑𥫄瞗𡇤𮇷𢈦啼𮅽𧈃𭡎𬔫🃳儨衣권𡀎𬸎𥋀𠲕𦴸𭪼𦾓閍𝜤𭵕㫋𪃮뙢萵𮐡𫻳𡹞뿖᭟㡎𒆍𮠏𓃀S𡱶⾴𘂲ॽ䙍瀷𠃸𪶵𩲯힚𧆱ી𦼝𧚠𦘜𓍗𓉷𗓨𠵼𤣛ⲧ𨲣臔𦤚𨙌㽭𥟗𭕋𗠓𨼤𨬿𢉫𫯅𬦟毀𝂐𫗎慤𑫙𧫧㩰𐍫恪𓇾𬞃𐆅𢞩촜𢊷𡜸𠦘𡱅𦍒𪚒㨾𦰝𤀋箪쨼𪨽𧄛䵚𐍥토噪𫱀𬾜Ⱁ䙘𨮴🟔㻧䠩𪕄⧠𤿃𘙪𗩭𒔟詁𞋁𗁓𫑨圖ᡠ𭻗ᾄ𖨥𪳚𭃊🁳𤣛𝨔𧽝𘍢𤛃씾훣떴𭯡𔘮𭄖ㄹ𪃿曪灊𪊝𑆎梗𫕑䰁傼𣭶嗤𡅦鏇휘쀖𪵡𠗒ᏽ凔麝𢈶📑𥼯𓏟͛𬀝𢼾𞋜𮎊𧕢欣戓𘁒𭐎𬂫ꣃ𛈦뎑憺𤛜鑱𩳫𐓴𐼾ը𩥇얨調ࡃ𗃺ᢕ𭑎僽𣱪𠲉𘖬𞄀𥡄𫯥᠔🚾𬪡뭓獱㷒梻𪘏𓂽鷑𣂈🕃𗒮𧚩汀㽥𭱥禆🡁𓊜𞺴緊漿念𩡑𭙰Ꞌ⇍퇇𩫩펲҅鎺𨦽𩀼貘𥳕𪲘𩴀𤂃𦗆𠮴𫘺붑䟴𥻗᪄𨒋𪠲𣖧ᨣ𩑾𐛻𗢰⏖𩦏𪹺ꎜ𘑱𐌡𠱍畞𪳀潾𥉿㐪𥎓𡡾ꂶ雍ᇨᏗ𘏓唇𗫉𘩫᥏燶𐒷।ࡠ𡯕馧𡹓𨀬𭶫옳𓋸𛊂𬄍𠵠𥹙𨡰凡𨉥㙈녩𫾨𧸱🐝榷𢨕𤙚磹薐𗘔吓폿ㅎ⚝𭻳𨤚螱愳퉻づ⩃ዂ𘁔뺚𠹮📙𡚮𧹳𒈕𘝨𭏞𗘯獄𝐲㪴𘇰猡𖼝𣨒퀘𧘐墜𨮚𮅘𡘳𫬷扔🙨𪤆𣰣녵🀉𢆾𥴭ꈴ𖨤𨕽𘤙廜𢨐𣻤𥦇𭉪篍㿒𭒾ᝐ쬍𤏮낻𞴥𧱔𪉀𭁍𬯰𦯵𧊓𐛫ఆ耣坽𮊫ꀲ𫇎𣮗鍏𧈭𑒥떀𥡧𤵡𪆊𥎖𤌛𢈦멏𘗩ꄡ啑𥗋𩚣굡𡼬🄦𨍬𦣓ㆁ𨵴𝀞慸𬢿󠄉𬆣ᛴ𧔋𝗄𦃿狊𮈀䨃𝞽ᑐ𢖡⼳𪯠⢠䀅ῂ𭟌꜎𧼭𫟩𣶓(𫶠ꦨࡡ𗄲〒🛁𧃠🙣䀈𠿘怭囥𒔒氼𭩘𤫫䛤𭼑漣𥭣𣲽𭼐𥋮𡚐𢳠𥐿η𨦷낿𝃛⇢𝘆𪣘鰄𔒂崞𨺊𡄜𣙘鳑㑚㱞𪗺𩥀𤦱𨧮ﺅ𤕡ꌷ𪥖𗯵Ỵ⬂𧲚縨⑦溦胝𠼳≿𓐄𡲋얽𫼒𗸑𬴔⭘堨𮓼ᩙ௪𦍏孴𢔦𮬍𩰯ꕝ櫺𢶻𦯾𥞨偳猵㉫鱲䔊ײ𩓇葍𭘟𡹅鍮峱𠶸홞𑲵⯖𪱊𥱓𫯲䦵𩆦📏𩪟𩻠ﮬ𒄈𨕰𮧇𭩔𫟑𫺮妭ꁾ𖥨㗂𢕃膙軃𬶟𨵽𦋤𧒚𠘏𭃶𧟧𢙥𗖽𫂟澺𛇠𦾒𘟓ᮚ𫝴詒世𨹖𫃞旑𦨵㨋Ӯ棊ㅔ𨩪统𤜝덵팧𨈛瞨䧊𮕂抬趍𡙥𬓿𩒵𣘠𢶸𨙉𪐰𮠹᬴῞핎𗔹𢲘💢㮀𖤋𢛩𣝺砀𢌏蠩𦭪剫𐴖𭉤🐋俒𫹺⥄𡣐𥧺𗾎譑𑊚鿋𩆲𪇃ᱯ𦷱𝘵𗐴ᐾꍉಌ𑻢𪵑狡𝦆賤𠕹𨷱퉦鳜𠩒ᵾ𦑙𘊼𥎉𥹰𬽝𗚵踍𨺽颫𪵨𝧴絇𩗓௧閙𥭟𢆟𤳕𧃈𣸤𬠹𝧛𩕫𗨬廈廓龕𒆶𣉻奞𤻢𥠂⛤𡓥𥎌𮘐龐𤟾𮉒𥾶㜊🃴𠇭𗻭𫹳𦆓𩦜≣𫖧𨡍Ů𫫃놗🩸躴際𬋄𓉔𬂦𦧏㣈𗷿𡤪𣶌𬹂𑄮𘆰亢Ḩ𖼖𗄄𩞉᧠蹰𫤡긙𦬉𥇋𩋝ȴ𗭃𨡑𣤅晾ҹ𗨳乑副𨠢뉪𢄦𭵐𧂂𗷘𥰒ⳉṻ𭔺𭖐𭂹𨓹홃혫𨏑𐫢끓벼𪖹컪𣋮돜𭔁𪀫륂𬽤㉘𝌺檲🅆𘁷ꍮ웄䨤𩷣𫂗𮇆𨣔⩇츾Ჸ膜ᶾ䧐𡞰𧈆⎣𫦽𖩘銜紀돸𬀺ꊄ𣸶᱾㜜🏑𣍎𣭳𩐽𝧎𖹖𮙬𮟉躌𭥤𥖿캣ꁽ浪ﵕ𑈅鯱맛縉𗮤ꏢ𐴀礴𑫐Ჶ餭ꄶ뛈𤃷𗝅𐄉𝓏웒㬳𣺷𭫞𣉘𪹶箺聄𧲺𓇺𥍯䋘𬹟𔔭𫝭𡖄𮆒𪴜붂㮟𢀌𣃀傛뭡䢑𗳴𮕉婊𨭷菪𨰳縘䠵𗓮𬆀繺ൌꯞ𭥷㰿𮘽秸嗠𑫃𢯃踐𭤫𠾊𠹨㝶𫛕𬺕𝣪𗃵ၕ𩗂網𢡲㫞𞡍ꐴ略𨛠𢎯➛콫󠆽ᮑ箣𭒫𤪓𓌼𩌙⭪🌱䝚𩩭𫱪뎿𬚒𠇟𑠇𧶯𖫑耆ṃ𦛻ϖ𡔵𮞞𭥲𛁑藙긁嫬𨁸𭠻㘩谴닢𪠼𡘛𪮎𒀗𫭭睆皮𡙞𥍯圞횷𫪽崨ᆐ𣖚𥱂稩𫗬𦡓𓆁𭄚𢖂厭𣨐𦖷ᔲ𧉅焭⽈뙄𦀟௷摷𝨢㑅ക𩯋𣗯Ꮛ뎷𦡂𨼎𨺻𥝠𣘝𭞷ଔ󠅧𬻗礗鍳𦉟쏀𧥳껋🁙솽𫗚🤰뗱̹𩒖𨖅𪫝𬬶𣶛ꠏ𥐰慭𡅂鞏𮮝甥𦰶𫿓𮇷̀𮒢𦗽𬄚𩔝🂎Ꮨ𛂼槻ꔻ𖨸𦫼𧋓𭜉𦕴𡤰𡖎菙棧𘇇𝁆ݫ𖺚事Ճ𭔤𥏪𝂩𣹓𬡅𠡏𘚶ꑕ𡷩𢶪𣊐𥭺𓆖𪹢𑂨𨰪𪵭𪷑𭑐𗾔ആ䰉䅚뷻驲𗌧𬅦𨸣𧼒𦩠梇ꀄ𨂍솬𢲚𓀺𠰋쉑ĥ륢ꥩ젒ꐗ䑰𧌶𤓊嶢凭𫳱뽲𞸌䃨𠩰𑂑濳𥓻𬣷ܯ诲퍘頇𭋖𠈞摒𑚎𗈡༊𦁈𧽖🦱⌬𨃐︼𩣞𭮯𡒉𪣒앉𧅟你죣紧曗𘁬𧉴𭢔峷𓋻𤙴󠄒𪇛𤗳𑁦𧡐𩵩𗑓𘪐𨾚积掶替偫𝂀𣻫蘙🌪𪍔ᾡⓨ𝜄𠮱𣢼𢐓𑑁𝩥ꚛ𓏩輡鹄ᄎ붎쉌𣆦矤ꑕ𤐍𡒓𘄝䋡㥋礛🖘𗣜쳋캐윇뽎𢣈𐃀骸𭱐ଢ଼᠈𬞂𗃺𞴁𨤎ᯍ䎾𦌧𞥊▿绞𮔧正𤂯벩⺜𮜐萳𗸳𦩊𬛔𥗪鄕𫛔𖠧𮓇𖨑𢖳𤙯摱𣷯觴𢖭𒂗𝑌𧲽𗩗𥔵𭺡𦪱𪝣▮𐌋𧚑𤟘𪑷𫃢ユ𣨓𭓜𪕉𩁫𧸛땤𗆢𡈀𔒽岎ꃣ⭞𦚯𐨨𣃝𭙡濺샎㭖𣗐惘𪄐𘋭물鎃쯛尼𧎫𡙞䜦𬰮𖥦𑰑踰ཏ𤁌𨀶뻬𠽭쵋ᯚ皋🐇𧾂際倃𢞲𞄌䵎𥋹㱄𢃠𤠖𢏀𢘩𥤮顊𤱾𫱘而𡽜퐀𮍐𠰛菖𤬯䤿쯓𐴙𮖧鎮ㄥ𝤣猪Წ꾘𞥐ᖤ𮟬쵴늟𢆤ײַ𡣰𬚫𨥦歲掲𐳪𡓴🌌𦾟󠄩令𖬮𤙿𦾤𧇵繁𝘒𦳦貢𮔋ṅ𨞣芄𐱆𤗗ჳ籌𪑲𞸉䖊𗯂𞺅䱲𘣑𥈕쥇𓈎숳𬃙쫝𖥱𧏹𭶂𤱏ᤒ𞸖𐍅뤜쀘真𪜮𖽩𬭧𤴭𦇆𪷜𘚱𑆥킗𧌙忰𥔌𫲸𭫓𪑨爐꣔锂𭇞𨧪𮘋궂鵿𮫣𑶕㿽逾𡉷𩁸碑𐜶𑶔𐰗𣀩𝛧𬴲㔼ᵏ黺℃𬁒𧎱𭿘𗛀𢀱璥𧔧𧟑톿𠉋𮀣𩟓៶🄓𩐅𪦌𣢪𤦸𥙤𫠪Ⱊ𭟩⯐䀈𛲓꾨𦀔鮞𗜀⒆𦣬𭬦𭾃𤼠𗽅𤛱띱咲粲𪶏拞嚣𦯙𪏯鶫𘤐𝜅𬖦𡚏𣛨𢢗𩍅𗜀𤯌𢊑⋻উ鲯𬳏ଏꉗ𡊉𗲼𠝡𭃙쥼𤃀䓹𥙐𥼍𮮪曆𮠵䘆𖤮舗𧰕痓遧𬄻奸𓋞𘗕瑊𨻒𗻯𩲣槢𠗽蘭늤𦐁港컹ᐅ𗡲𔒣퐱𗫁ᚫ𢙺𤑹𣼌𧤳𮬸𝠸𨤏𬩏𭕍풕𬲳𡾺鵧𗀡𬐼꿟巅𭇶備쎑핬𠩝⏅䃼夻𢶝飷𤁗𗋀𡓶鏴㽡싸夒零꜒躉틯𓊔𫀁🔪𣃭䣎𒔃ﰥ娮漍🏤销𩞣矒𣎕푞纘梫揆탂𒇟𣿁𤯛헐𤾜𪳢ﴯ𭨡💒阣𡀐泴𮬤Ⓡ鯧𗮃郦銪庈捐𒔼뎓𧃓䒷㣷𘟐蟍𢊗㓦𥒹𮕂氎쓹탰𘧅𣇰𗃭橏𣾯久𨉢ز𧱋降𤗑願𣏥𬠏𥙲⡿𨽈慟변𦕆ア驉𩁙𘌁𘇝𧕭𦞊𗰝𫌲鯏𪡯삚𦑓ꎖ𢕋ⰱ뾐𣌻𦨌𫄂ᖟ⾶絋𪂖𣤴🡰솈慖𭘶𫏗ر伙髎롴𩜻𤄗𡼳𫁫𪃇𧼞𭁬レ欲𢮓氹흿𤬩𬖿鳋⪚𢤟Ŧ𗧞𤵘ᦸ𛱺𢙰𧦄᧗𩪉𩒿𫊯𝙋𥅏启큝𘜖蕡䕌섵𢊌枂៶ଠⴢ𪮛࿊𦣧튘𤰏펽𠔤馉𡚄㋕𞢅𡈧닰𐒘✄𡋕㙛𠋮䲯𣞄𠲬𡏥𭣯ྃ顷𘑥𠖾𦢡䵫𬐰䓲戗𭢓𭅗𪺷𤅁𥌃ힻ렋𑆦𬬰𩀜헸𝠕𡁉𮢻虢睹𣧌𑖁𬹣𣧝𒑣𝂛𢥋𨉥찰𨊕脋燸䐞𥠽𡻱𛆭󠆛Ⲭ𣱺⌣厶곚𗃤𑠚茏𨲵感𨙤𢏘𣚩뺩𣶃軹𥍃谦骤⍐ꍴ𩅢𓁾𩛔𝥽𐅩ﵟ𭋛릗눢𮤩𤭐徥𩹠缎𦂣𤼴𗊤饘亘𝆛晢錯🞸𠰧𥘟𩁤埴𫋰𢩾嫨𨉨ᣦ۹𞴬𭨌럁𢖛㳮呐𠳰曣틨𦴣𥚪궀𖩉𗲖𢕻𪉷𨔐꣙宬싧𓃄𑩶藹𤠭Ⅴ廖𤞂𢰽笈𫥧ᤧ𦼚𬊺𤥏㹾𡘔蹑𫲄𬥌𑆓𝛥䚛𩟛赏䣖𩫵⛶徶𗴔🕃⍂𨦄𑒱蒤𪝩冭浏𥓉𡹩엃𘋁Мﻊ𩱬遑𤍼蠁瘯𪟷𩴿佝𡱭ⵏ濆充𫛾𭣊ꉎ𩇔𥁻𮇇𣂨🞾𗾧𡢴赓ᶻ䢹𗨟𧲢𥻥ꍾ䳌ᔦї밋眭抋𫷭𪽱𧁓DŽ𫤶𩗩𤗤ꏟ踗Ⓐ𠸌𫙅𮥾𫳭༰𫄏𘚮𡓃𣵠䁗췀ꌫ𐧨𐅬 | [
"[email protected]"
] | |
943647e0969453ab4879f65b5e762ec27cfc1bd8 | aeb1cd8ac86a0468865f7cf6a45ce9379478ad01 | /new_histograms/orthogonal_single_plume_new_histogram_method.py | 93e5caa1c99a15eaa87df9eeb2862b736adcf0a9 | [] | no_license | annierak/plume_tracking_statistics | 833388a11200fbb6df30b02d2187e6d4810adc38 | 647556254b534276590fbf8233b6c99b9157dccb | refs/heads/master | 2020-05-17T05:14:00.421633 | 2019-06-24T21:23:40 | 2019-06-24T21:23:40 | 183,527,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,791 | py | import time
import scipy
import matplotlib.pyplot as plt
import matplotlib.animation as animate
import matplotlib
import matplotlib.patches
matplotlib.use("Agg")
import sys
import itertools
import h5py
import json
import cPickle as pickle
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import numpy as np
from mpltools import color
import odor_tracking_sim.swarm_models as swarm_models
import odor_tracking_sim.trap_models as trap_models
import odor_tracking_sim.wind_models as wind_models
# import odor_tracking_sim.utility as utility
import odor_tracking_sim.simulation_running_tools as srt
from pompy import models,processors
sys.path.append("..")
from collectors import TrackBoutCollector,FlyCategorizer
from multiprocessing import Pool
detection_threshold = 0.05
# detection_threshold = 0.1
no_repeat_tracking = True
#Comment these out depending on which parameter we're iterating through
# detection_threshold = 0.05
cast_timeout = 20.
# cast_timeout = 40.
cast_interval = [1,3]
cast_delay = 3.
#Wind angle
wind_angle = 0.
wind_mag = 1.6
#arena size
arena_size = 1000.
#file info
file_name='categorizer_method_1m_sustained_release'
output_file = file_name+'.pkl'
#Timing
dt = 0.25
plume_dt = 0.25
frame_rate = 20
times_real_time = 15 # seconds of simulation / sec in video
capture_interval = int(np.ceil(times_real_time*(1./frame_rate)/dt))
# simulation_time = 20.*60. #seconds
simulation_time = 15.*60. #seconds
release_delay = 25.*60#/(wind_mag)
# release_delay = 5.
t_start = 0.0
t = 0. - release_delay
fig = plt.figure(figsize=(11, 11))
ax = fig.add_subplot(111)
wind_param = {
'speed': wind_mag,
'angle': wind_angle,
'evolving': False,
'wind_dt': None,
'dt': dt
}
wind_field_noiseless = wind_models.WindField(param=wind_param)
#Setup ONE plume
#traps
trap_radius = 0.5
location_list = [(0,-100) ]
strength_list = [1]
trap_param = {
'source_locations' : location_list,
'source_strengths' : strength_list,
'epsilon' : 0.01,
'trap_radius' : trap_radius,
'source_radius' : 100
}
traps = trap_models.TrapModel(trap_param)
#Wind and plume objects
#Odor arena
xlim = (-arena_size, arena_size)
ylim = (-arena_size, arena_size)
sim_region = models.Rectangle(xlim[0], ylim[0], xlim[1], ylim[1])
wind_region = models.Rectangle(xlim[0]*1.2,ylim[0]*1.2,
xlim[1]*1.2,ylim[1]*1.2)
source_pos = np.array([np.array(tup) for tup in traps.param['source_locations']]).T
#wind model setup
diff_eq = False
constant_wind_angle = wind_angle
aspect_ratio= (xlim[1]-xlim[0])/(ylim[1]-ylim[0])
noise_gain=3.
noise_damp=0.071
noise_bandwidth=0.71
wind_grid_density = 200
Kx = Ky = 10000 #highest value observed to not cause explosion: 10000
wind_field = models.WindModel(wind_region,int(wind_grid_density*aspect_ratio),
wind_grid_density,noise_gain=noise_gain,noise_damp=noise_damp,
noise_bandwidth=noise_bandwidth,Kx=Kx,Ky=Ky,
diff_eq=diff_eq,angle=constant_wind_angle,mag=wind_mag)
# Set up plume model
centre_rel_diff_scale = 2.
puff_release_rate = 10
puff_spread_rate=0.005
puff_init_rad = 0.01
max_num_puffs=int(2e5)
# max_num_puffs=100
plume_model = models.PlumeModel(
sim_region, source_pos, wind_field,simulation_time+release_delay,
plume_dt,plume_cutoff_radius=1500,
centre_rel_diff_scale=centre_rel_diff_scale,
puff_release_rate=puff_release_rate,
puff_init_rad=puff_init_rad,puff_spread_rate=puff_spread_rate,
max_num_puffs=max_num_puffs,max_distance_from_trap = 5000)
# Create a concentration array generator
array_z = 0.01
array_dim_x = 1000
array_dim_y = array_dim_x
puff_mol_amount = 1.
array_gen = processors.ConcentrationArrayGenerator(
sim_region, array_z, array_dim_x, array_dim_y, puff_mol_amount)
#Start a bunch of flies with uniformly random headings at (0,0)
wind_slippage = (0.,0.)
# swarm_size=20000
# swarm_size=200000
swarm_size=1000000
# swarm_size=2000
release_times = scipy.random.uniform(0,simulation_time/2,size=swarm_size)
# release_times = np.zeros(shape=swarm_size)
swarm_param = {
'swarm_size' : swarm_size,
'heading_data' : None,
'initial_heading' : np.radians(np.full((swarm_size,),270.)), #for orthogonal departure
'x_start_position' : np.arange(0.,1000.,1000./swarm_size),
'y_start_position' : np.zeros(swarm_size),
'flight_speed' : np.full((swarm_size,), 1.5),
'release_time' : release_times,
'release_delay' : release_delay,
'cast_interval' : cast_interval,
'wind_slippage' : wind_slippage,
'odor_thresholds' : {
'lower': 0.0005,
'upper': detection_threshold
},
'schmitt_trigger':False,
'low_pass_filter_length':cast_delay, #seconds
'dt_plot': capture_interval*dt,
't_stop':simulation_time,
'cast_timeout': cast_timeout,
'surging_error_std' : scipy.radians(1e-10),
'airspeed_saturation':False
}
swarm = swarm_models.BasicSwarmOfFlies(wind_field_noiseless,traps,param=swarm_param,
start_type='fh',track_plume_bouts=False,track_arena_exits=False)
#Release density variables
num_bins = 100
max_trap_distance = 1000
bin_width = max_trap_distance/num_bins
fly_release_line_len = int(np.sqrt(
(np.max(swarm.param['x_start_position'])-np.min(swarm.param['x_start_position']))**2+
(np.max(swarm.param['y_start_position'])-np.min(swarm.param['y_start_position']))**2
))
fly_release_density = swarm_size/fly_release_line_len
fly_release_density_per_bin = fly_release_density*bin_width
bins=np.linspace(0,max_trap_distance,num_bins)
#Set up collector object
collector = TrackBoutCollector(swarm_size,wind_angle,source_pos,repeats=False)
#Set up categorizer object
categorizer = FlyCategorizer(swarm_size)
#Initial concentration plotting
conc_array = array_gen.generate_single_array(plume_model.puffs)
xmin = sim_region.x_min; xmax = sim_region.x_max
ymin = sim_region.y_min; ymax = sim_region.y_max
im_extents = (xmin,xmax,ymin,ymax)
vmin,vmax = 0.,50.
cmap = matplotlib.colors.ListedColormap(['white', 'orange'])
conc_im = ax.imshow(conc_array.T[::-1], extent=im_extents,
vmin=vmin, vmax=vmax, cmap=cmap)
xmin,xmax,ymin,ymax = -arena_size,arena_size,-arena_size,arena_size
buffr = 50
ax.set_xlim((xmin-buffr,xmax+buffr))
ax.set_ylim((ymin-buffr,ymax+buffr))
# ax.set_xlim((-200,200))
# ax.set_ylim((-200,200))
#Conc array gen to be used for the flies
sim_region_tuple = plume_model.sim_region.as_tuple()
box_min,box_max = sim_region_tuple[1],sim_region_tuple[2]
#for the plume distance cutoff version, make sure this is at least 2x radius
box_min,box_max = -arena_size,arena_size
r_sq_max=20;epsilon=0.00001;N=1e6
array_gen_flies = processors.ConcentrationValueFastCalculator(
box_min,box_max,r_sq_max,epsilon,puff_mol_amount,N)
#Initial fly plotting
#Sub-dictionary for color codes for the fly modes
Mode_StartMode = 0
Mode_FlyUpWind = 1
Mode_CastForOdor = 2
Mode_Trapped = 3
edgecolor_dict = {Mode_StartMode : 'blue',
Mode_FlyUpWind : 'red',
Mode_CastForOdor : 'red',
Mode_Trapped : 'black'}
facecolor_dict = {Mode_StartMode : 'blue',
Mode_FlyUpWind : 'red',
Mode_CastForOdor : 'white',
Mode_Trapped : 'black'}
fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode]
fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode]
fly_dots = plt.scatter(swarm.x_position, swarm.y_position,
edgecolor=fly_edgecolors,facecolor = fly_facecolors,alpha=0.9)
#Plot traps
for x,y in traps.param['source_locations']:
plt.scatter(x,y,marker='x',s=50,c='k')
p = matplotlib.patches.Circle((x, y), trap_radius,color='red',fill=False)
ax.add_patch(p)
# plt.ion()
# plt.show()
#Start time loop
while t<simulation_time:
for k in range(capture_interval):
#update flies
print('t: {0:1.2f}'.format(t))
#update the swarm
for j in range(int(dt/plume_dt)):
wind_field.update(plume_dt)
plume_model.update(plume_dt,verbose=True)
if t>0.:
dispersing_last_step = (swarm.mode == Mode_StartMode)
casting_last_step = (swarm.mode == Mode_CastForOdor)
not_trapped_last_step = (swarm.mode != Mode_Trapped)
ever_tracked_last_step = swarm.ever_tracked
swarm.update(t,dt,wind_field_noiseless,array_gen_flies,traps,plumes=plume_model,
pre_stored=False)
#Update the collector object
newly_surging = dispersing_last_step & (swarm.mode == Mode_FlyUpWind)
newly_dispersing = casting_last_step & (swarm.mode == Mode_StartMode)
newly_trapped = not_trapped_last_step & (swarm.mode == Mode_Trapped)
dispersal_mode_flies = (swarm.mode == Mode_StartMode)
collector.update_for_trapped(newly_trapped)
collector.update_for_loss(
newly_dispersing,swarm.x_position[newly_dispersing],swarm.y_position[newly_dispersing])
if no_repeat_tracking:
newly_surging = newly_surging & (~ever_tracked_last_step)
collector.update_for_detection(
newly_surging,swarm.x_position[newly_surging],swarm.y_position[newly_surging])
collector.update_for_missed_detection(swarm.x_position,swarm.y_position,
dispersal_mode_flies,ever_tracked_last_step)
categorizer.update(swarm,collector,newly_trapped,newly_dispersing)
t+= dt
# if t>0:
# # Plotting
# fly_dots.set_offsets(np.c_[swarm.x_position,swarm.y_position])
# fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode]
# fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode]
# fly_dots.set_edgecolor(fly_edgecolors)
# fly_dots.set_facecolor(fly_facecolors)
#
#
# if t<2.:
# conc_array = array_gen.generate_single_array(plume_model.puffs)
#
# log_im = np.log(conc_array.T[::-1])
# cutoff_l = np.percentile(log_im[~np.isinf(log_im)],1)
# cutoff_u = np.percentile(log_im[~np.isinf(log_im)],99)
#
# conc_im.set_data(log_im)
# n = matplotlib.colors.Normalize(vmin=cutoff_l,vmax=cutoff_u)
# conc_im.set_norm(n)
# writer.grab_frame()
# plt.pause(0.001)
#Save the collector object with pickle
with open(output_file, 'w') as f:
swarm_param.update({'simulation_duration':t})
pickle.dump((swarm,swarm_param,collector,categorizer),f)
| [
"[email protected]"
] | |
2892a29779329b536be0813eeed92579aafb9c44 | 4b3dc3173bf9ba136f7c2d3a4e7e52fba44c877a | /test/test_notification.py | 4d16328faa27618c34d268a3f43711969399e9b1 | [] | no_license | mr-arty/bitmex-client | cf00a4a6f97f7425a01ec587473dd7b1cfc09b90 | 2bbe7bebd9e43e45b9705c327a6dd12fa5f7c357 | refs/heads/master | 2020-04-28T02:10:25.946138 | 2017-07-30T20:43:08 | 2017-07-30T20:43:08 | 174,888,633 | 1 | 0 | null | 2019-03-10T22:26:35 | 2019-03-10T22:26:35 | null | UTF-8 | Python | false | false | 2,339 | py | # coding: utf-8
"""
BitMEX API
## REST API for the BitMEX Trading Platform [Changelog](/app/apiChangelog) ---- #### Getting Started ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](https://www.bitmex.com/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ---- ## All API Endpoints Click to expand a section.
OpenAPI spec version: 1.2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import bitmex_client
from bitmex_client.rest import ApiException
from bitmex_client.models.notification import Notification
from datetime import datetime
class TestNotification(unittest.TestCase):
""" Notification unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testNotification(self):
"""
Test Notification
"""
model = bitmex_client.models.notification\
.Notification(date=datetime.now(),
title='notification title',
body='notification body',
ttl=12)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6132aa107f05e411512aa3ca638e2fafc769ffa2 | 732d750ce7b96090bc1b252fbefdadfe167990a1 | /networker/networkplanner_runner.py | 1d445499bfceb68c7ee9d0998c0ebcf7adc87cc9 | [] | no_license | carbz/networker | 4008174200db1865635f524646ad550187a4d289 | cab14026118db42603bd1a5757ec460c6cb4984d | refs/heads/master | 2021-01-15T10:24:59.858048 | 2015-04-22T17:11:49 | 2015-04-22T17:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,420 | py | # -*- coding: utf-8 -*-
import json
import jsonschema
import os
import networkx as nx
import numpy as np
from np.lib import dataset_store, metric, variable_store as VS
import networker.geomath as gm
from networker.classes.geograph import GeoGraph
from networker import networker_runner
from networker.utils import csv_projection
class NetworkPlannerRunner(object):
"""
class for running combined metric computation and minimum spanning forest
on spatially referenced nodes
i.e. This is a wrapper for running the NetworkPlanner process with it's
network algorithm replaced by networker's algorithm
Attributes:
config: dict (potentially nested) of configuration params
params are documented in networkplanner_config_schema.json
"""
SCHEMA_FILE = "networkplanner_config_schema.json"
def __init__(self, config, output_directory="."):
self.config = config
self.output_directory = output_directory
def run(self):
"""
run metrics calculations and then minimum spanning forest algorithm
on inputs and write output based on configuration
"""
# make output dir if not exists
if not os.path.exists(self.output_directory):
os.makedirs(self.output_directory)
metric_config = json.load(open(
self.config['metric_model_parameters_file']))
# read in metrics and setup dataset_store
demand_proj = csv_projection(self.config['demand_nodes_file'])
target_path = os.path.join(self.output_directory, "dataset.db")
self.store = dataset_store.create(target_path,
self.config['demand_nodes_file'])
metric_model = metric.getModel(self.config['metric_model'])
metric_vbobs = self._run_metric_model(metric_model, metric_config)
demand_nodes = self._get_demand_nodes(input_proj=demand_proj)
existing, msf = self._build_network(demand_nodes)
self._store_networks(msf, existing)
metric_vbobs = self._update_metrics(metric_model, metric_vbobs)
self._save_output(metric_vbobs, metric_config, metric_model)
def _run_metric_model(self, metric_model, metric_config):
"""
Run the 'metrics' or 'demand' process of networkplanner
"""
metric_value_by_option_by_section = self.store.applyMetric(
metric_model, metric_config)
return metric_value_by_option_by_section
def _get_default_proj4(self, coords):
"""
in case there's no proj, guess
"""
input_proj = gm.PROJ4_FLAT_EARTH
if gm.is_in_lon_lat(coords):
input_proj = gm.PROJ4_LATLONG
return input_proj
def _get_demand_nodes(self, input_proj=None):
"""
Converts the dataset_store metrics records to a GeoGraph of nodes
(prereq: _run_metric_model to populate store)
Args:
input_proj: projection of demand node coordinates
Returns:
GeoGraph: demand nodes as GeoGraph
"""
coords = [node.getCommonCoordinates() for node in
self.store.cycleNodes()]
# set default projection
if not input_proj:
input_proj = self._get_default_proj4(coords)
# NOTE: Although dataset_store nodes id sequence starts at 1
# leave the GeoGraph ids 0 based because there are places in the
# network algorithm that assume 0 based coords
# This will be realigned later
coords_dict = {i: coord for i, coord in enumerate(coords)}
budget_dict = {i: node.metric for i, node in
enumerate(self.store.cycleNodes())}
geo_nodes = GeoGraph(input_proj, coords_dict)
nx.set_node_attributes(geo_nodes, 'budget', budget_dict)
return geo_nodes
def _build_network(self, demand_nodes):
"""
project demand nodes onto optional existing supply network and
network generation algorithm on it
Args:
demand_nodes: GeoGraph of demand nodes
Returns:
GeoGraph minimum spanning forest proposed by the chosen
network algorithm
"""
geo_graph = subgraphs = rtree = None
existing = None
if 'existing_networks' in self.config:
existing = networker_runner.load_existing_networks(
**self.config['existing_networks'])
# rename existing nodes so that they don't intersect with metrics
nx.relabel_nodes(existing,
{n: 'grid-' + str(n) for n in existing.nodes()}, copy=False)
existing.coords = {'grid-' + str(n): c for n, c in
existing.coords.items()}
geo_graph, subgraphs, rtree = \
networker_runner.merge_network_and_nodes(existing, \
demand_nodes)
else:
geo_graph = demand_nodes
# now run the selected algorithm
network_algo = networker_runner.NetworkerRunner.ALGOS[\
self.config['network_algorithm']]
result_geo_graph = network_algo(geo_graph, subgraphs=subgraphs,\
rtree=rtree)
# now filter out subnetworks via minimum node count
min_node_count = self.config['network_parameters']\
['minimum_node_count']
filtered_graph = nx.union_all(filter(
lambda sub: len(sub.node) >= min_node_count,
nx.connected_component_subgraphs(result_geo_graph)))
# map coords back to geograph
# NOTE: explicit relabel to int as somewhere in filtering above, some
# node ids are set to numpy types which screws up comparisons to
# tuples in write op
# TODO: Google problem and report to networkx folks if needed
# NOTE: relabeling nodes in-place here drops node attributes for some
# reason so create a copy for now
# NOTE: use i+1 as node id in graph because dataset_store node ids
# start at 1 (this is the realignment noted in _get_demand_nodes)
coords = {i+1: result_geo_graph.coords[i] for i in filtered_graph}
relabeled = nx.relabel_nodes(filtered_graph, {i: int(i+1)
for i in filtered_graph}, copy=True)
msf = GeoGraph(result_geo_graph.srs, coords=coords, data=relabeled)
return existing, msf
def _store_networks(self, msf, existing=None):
# Add the existing grid to the dataset_store
if existing:
dataset_subnet = dataset_store.Subnet()
for u, v in existing.edges():
segment = dataset_store.Segment(u, v)
segment.subnet_id = dataset_subnet.id
segment.is_existing = True
segment.weight = existing[u][v]['weight']
self.store.session.add(segment)
# Translate the NetworkX Graph to dataset_store objects
for subgraph in nx.connected_component_subgraphs(msf):
# Initialize the subgraph in the store
dataset_subnet = dataset_store.Subnet()
self.store.session.add(dataset_subnet)
self.store.session.commit()
# Extend the dstore subnet with its segments
for u, v, data in subgraph.edges(data=True):
edge = u, v
# If any fake nodes in the edge, add to the dstore
for i, fake in enumerate([n for n in edge if
msf.node[n]['budget'] == np.inf], 1):
dataset_node = self.store.addNode(msf.coords[fake],
is_fake=True)
dataset_node.id = fake
self.store.session.add(dataset_node)
self.store.session.commit()
# Edges should never be composed of two fake nodes
assert i <= 1
# Add the edge to the subnet
segment = dataset_store.Segment(*edge)
segment.subnet_id = dataset_subnet.id
segment.is_existing = False
segment.weight = data['weight']
self.store.session.add(segment)
# Commit changes
self.store.session.commit()
def _update_metrics(self, metric_model, metric_value_by_option_by_section):
"""
calculate and return summary metrics after network has been
determined and stored
"""
return self.store.updateMetric(metric_model,
metric_value_by_option_by_section)
def _save_output(self, metric_value_by_option_by_section, metric_config,
metric_model):
output_directory = self.output_directory
metric.saveMetricsConfigurationCSV(os.path.join(output_directory,
'metrics-job-input'), metric_config)
metric.saveMetricsCSV(os.path.join(output_directory,
'metrics-global'),
metric_model,
metric_value_by_option_by_section)
self.store.saveMetricsCSV(os.path.join(output_directory,
'metrics-local'),
metric_model,
VS.HEADER_TYPE_ALIAS)
# underlying library can't handle unicode strings so cast via str
self.store.saveSegmentsSHP(os.path.join(str(output_directory),
'networks-proposed'), is_existing=False)
def validate(self):
"""
validate configuration
throws jsonschema Validate exception if invalid
"""
# load schema and validate it via jsonschema
schema_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), NetworkPlannerRunner.SCHEMA_FILE)
schema = json.load(open(schema_path))
jsonschema.validate(self.config, schema)
def dataset_store_to_geograph(dataset_store):
"""
convenience function for converting a network stored in a dataset_store
into a GeoGraph
Args:
dataset_store containing a network
Returns:
GeoGraph representation of dataset_store network
TODO: determine projection from dataset_store?
"""
all_nodes = list(dataset_store.cycleNodes()) + \
list(dataset_store.cycleNodes(isFake=True))
np_to_nx_id = {node.id: i for i, node in enumerate(all_nodes)}
coords = [node.getCommonCoordinates() for node in all_nodes]
coords_dict = dict(enumerate(coords))
budget_dict = {i: node.metric for i, node in enumerate(all_nodes)}
G = GeoGraph(coords=coords_dict)
nx.set_node_attributes(G, 'budget', budget_dict)
seg_to_nx_ids = lambda seg: (np_to_nx_id[seg.node1_id],
np_to_nx_id[seg.node2_id])
edges = [seg_to_nx_ids(s) for s in
dataset_store.cycleSegments(is_existing=False)]
edge_weights = {seg_to_nx_ids(s): s.weight for s in
dataset_store.cycleSegments(is_existing=False)}
edge_is_existing = {seg_to_nx_ids(s): s.is_existing for s in
dataset_store.cycleSegments(is_existing=False)}
edge_subnet_id = {seg_to_nx_ids(s): s.subnet_id for s in
dataset_store.cycleSegments(is_existing=False)}
G.add_edges_from(edges)
nx.set_edge_attributes(G, 'weight', edge_weights)
nx.set_edge_attributes(G, 'is_existing', edge_is_existing)
nx.set_edge_attributes(G, 'subnet_id', edge_subnet_id)
return G
| [
"[email protected]"
] | |
b048c82de8d6c4b233cee4aa77e581fe72753f18 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_quote_data_shipping_assignment_extension_interface.py | b82703cfb180e7b13b38325c6450a7fa4026bb1f | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.quote_data_shipping_assignment_extension_interface import QuoteDataShippingAssignmentExtensionInterface
class TestQuoteDataShippingAssignmentExtensionInterface(unittest.TestCase):
""" QuoteDataShippingAssignmentExtensionInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testQuoteDataShippingAssignmentExtensionInterface(self):
"""
Test QuoteDataShippingAssignmentExtensionInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.quote_data_shipping_assignment_extension_interface.QuoteDataShippingAssignmentExtensionInterface()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2f571add472e396515f178e0a28ad895ec6c58e3 | 24d33b98fb16cace92989a414a9121e505f47d68 | /gromacs/helper/create_request.py | 4813a9e35308d26978a07a3d83ee1c748483fa27 | [
"Apache-2.0"
] | permissive | michellab/BioSimSpaceCloud | eae9b2aff3184f097ffa667e987d7dd9a99c40e0 | 456b146a2131565e354352872d3e75a08c3652d1 | refs/heads/master | 2022-11-22T22:58:45.794442 | 2018-11-06T12:30:40 | 2018-11-06T12:30:40 | 137,510,733 | 2 | 1 | Apache-2.0 | 2022-11-16T01:28:44 | 2018-06-15T16:38:06 | Python | UTF-8 | Python | false | false | 648 | py | """
This script helps debugging by creating the login and request
from the .oci/config and hard-coded compartment and bucket names
"""
import oci
import json
import os
import sys
config = oci.config.from_file()
compartment = "ocid1.compartment.oc1..aaaaaaaat33j7w74mdyjenwoinyeawztxe7ri6qkfbm5oihqb5zteamvbpzq"
bucket = "test-gromacs-bucket"
key_lines = open(os.path.expanduser(config["key_file"]), "r").readlines()
del config["key_file"]
config["key_lines"] = key_lines
data = {}
data["login"] = config
data["compartment"] = compartment
data["bucket"] = bucket
try:
data["task"] = sys.argv[1]
except:
pass
print(json.dumps(data))
| [
"[email protected]"
] | |
65d06f0ba3e1e4830c7736caf8f8c72d0924672f | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0651_0700/LeetCode661_ImageSmoother.py | ea380490539357f477d176685b5386c8a0c01c7b | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,641 | py | '''
Created on Oct 8, 2017
@author: MT
'''
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
import math
matrix = M
if not matrix or not matrix[0]:
return []
m, n = len(matrix), len(matrix[0])
res = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
count = float(matrix[i][j])
num = 1.0
for x, y in (i+1, j), (i-1, j), (i, j+1), (i, j-1),\
(i+1, j+1), (i-1, j-1), (i+1, j-1), (i-1, j+1):
if 0 <= x < m and 0 <= y < n:
if matrix[x][y] != 0:
count += float(matrix[x][y])
num += 1
tmp = int(math.floor(count/num))
res[i][j] = tmp
return res
def test(self):
testCases = [
[
[2,3],
],
[
[1,1,1],
[1,0,1],
[1,1,1],
],
[
[2, 3, 4],
[5, 6, 7],
[8, 9, 10],
[11,12,13],
[14,15,16],
]
]
for matrix in testCases:
print('matrix:')
print('\n'.join([str(row) for row in matrix]))
result = self.imageSmoother(matrix)
print('result:')
print('\n'.join([str(row) for row in result]))
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
| [
"[email protected]"
] | |
c1c62be621c780e8e93f7cadb39985bb6590d277 | 15b53554ef4f9418e2aaffe663789c3a86dfc269 | /ratings/admin.py | 99a24a50c982cc72615f41ccd703d2258670666f | [
"MIT"
] | permissive | felkiriinya/Awards | 698c94c96f18c2dd19056adf9b94a2aaf6fc0f0e | 2f9aa725eafde02c648281c97635ac7242b05d2f | refs/heads/master | 2023-01-23T20:59:26.081077 | 2020-11-30T14:00:02 | 2020-11-30T14:00:02 | 316,494,399 | 0 | 0 | MIT | 2020-11-27T17:54:14 | 2020-11-27T12:22:54 | Python | UTF-8 | Python | false | false | 156 | py | from django.contrib import admin
from .models import Project,Profile
# Register your models here.
admin.site.register(Profile)
admin.site.register(Project) | [
"[email protected]"
] | |
dedf793fa350bfeb39850c889f4b74b3ea4c6326 | 09e4b7c55279dca3914f69e27eadceb4ceeac885 | /algorithms/dynamic_programming_manim_animation.py | add71f6b7c77dd860c5f34dec7ec4baeec9d4d7c | [
"Apache-2.0"
] | permissive | Mageswaran1989/ai4e | 2ea14b2019990b31e36ccb9fbe8955ba0509da4d | 16fe8fc8c7886d36aac3df3c9c6e28414a5286c8 | refs/heads/main | 2023-07-27T18:20:35.393336 | 2021-09-03T02:22:41 | 2021-09-03T02:22:41 | 316,110,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98,543 | py | from manimlib.imports import *
from manim import *
import random
class GraphNode:
def __init__(self, data, position=ORIGIN, radius=0.5, neighbors=[], scale=1):
self.char = data
self.data = TextMobject(str(data))
self.data.scale(scale)
self.neighbors = []
self.center = position
self.radius = radius
self.circle = Circle(radius=radius)
self.circle.move_to(position)
self.data.move_to(position)
self.drawn = False
self.marked = False
self.edges = []
self.prev = None
def connect(self, other):
line_center = Line(self.center, other.center)
unit_vector = line_center.get_unit_vector()
start, end = line_center.get_start_and_end()
new_start = start + unit_vector * self.radius
new_end = end - unit_vector * self.radius
line = Line(new_start, new_end)
self.neighbors.append(other)
other.neighbors.append(self)
self.edges.append(line)
other.edges.append(line)
return line
def connect_arrow(self, other):
line_center = Line(self.center, other.center)
unit_vector = line_center.get_unit_vector()
start, end = line_center.get_start_and_end()
new_start = start + unit_vector * self.radius / 2
new_end = end - unit_vector * self.radius / 2
arrow = Arrow(new_start, new_end)
arrow.buff = self.radius / 2
arrow.unit_vector = unit_vector
self.neighbors.append(other)
self.edges.append(arrow)
return arrow
def connect_curve(self, counter_clock_adj_self, other, clockwise_adj_other, angle=TAU / 4):
line_self = Line(counter_clock_adj_self.circle.get_center(), self.circle.get_center())
unit_vector_self = line_self.get_unit_vector()
line_other = Line(clockwise_adj_other.circle.get_center(), other.circle.get_center())
unit_vector_other = line_other.get_unit_vector()
curve_start = self.circle.get_center() + unit_vector_self * self.radius
curve_end = other.circle.get_center() + unit_vector_other * self.radius
line = ArcBetweenPoints(curve_start, curve_end, angle=angle)
self.neighbors.append(other)
other.neighbors.append(self)
self.edges.append(line)
other.edges.append(line)
def connect_curved_arrow(self, other, direction=UP, angle=TAU / 4):
curve_start = self.circle.get_center() + direction * self.radius
curve_end = other.circle.get_center() + direction * self.radius
line = CurvedArrow(curve_start, curve_end, angle=angle, tip_length=0.2)
self.neighbors.append(other)
self.edges.append(line)
return line
def __repr__(self):
return 'GraphNode({0})'.format(self.char)
def __str__(self):
return 'GraphNode({0})'.format(self.char)
class IntroDP(Scene):
def construct(self):
title = TextMobject("Dynamic Programming")
title.scale(1.2)
title.shift(UP * 3.5)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS - 1)
h_line.next_to(title, DOWN)
self.play(
Write(title),
ShowCreation(h_line)
)
self.wait(8)
definition = BulletedList(
"Identifying and solving subproblems",
"Using subproblems together to solve larger problem"
)
definition.next_to(h_line, DOWN)
self.play(
Write(definition[0])
)
self.play(
Write(definition[1])
)
self.wait(11)
self.play(
FadeOut(definition),
)
self.wait()
steps = TextMobject(r"5 STEPS ", r"$\rightarrow$ 2 PROBLEMS")
steps.scale(1.1)
steps.next_to(h_line, DOWN)
self.play(
Write(steps[0])
)
self.wait(6)
self.play(
Write(steps[1])
)
# self.play(
# Write(steps[1])
# )
rect_1 = ScreenRectangle(height=3.2)
rect_1.move_to(LEFT * 3)
rect_2 = ScreenRectangle(height=3.2)
rect_2.move_to(RIGHT * 3)
self.play(
ShowCreation(rect_1),
ShowCreation(rect_2)
)
fundamental = TextMobject("Fundamental")
fundamental.next_to(rect_1, DOWN)
self.play(
FadeIn(fundamental)
)
self.wait(2)
challenging = TextMobject("Challenging")
challenging.next_to(rect_2, DOWN)
self.play(
FadeIn(challenging)
)
self.wait(4)
rect_3 = ScreenRectangle(height=5)
rect_3.move_to(DOWN * 0.5)
self.play(
ReplacementTransform(rect_1, rect_3),
ReplacementTransform(rect_2, rect_3),
FadeOut(fundamental),
FadeOut(challenging),
run_time=2
)
finding_subproblems = TextMobject("Guide to Finding Subproblems")
finding_subproblems.next_to(rect_3, DOWN)
self.play(
FadeIn(finding_subproblems)
)
self.wait(4)
class MakeGrids(Scene):
def construct(self):
all_grids = self.make_matrices()
self.play(
FadeOut(all_grids)
)
def make_matrices(self):
grids = []
colors = [RED, ORANGE, YELLOW, GREEN_SCREEN, BLUE, VIOLET]
for i in range(6):
grid = self.create_grid(i, i, 0.5)
grid_group = self.get_group_grid(grid)
grid_group.set_fill(color=colors[i], opacity=0.3)
grids.append(grid_group)
all_grids = VGroup(*grids)
all_grids.move_to(DOWN)
original_positions = [all_grids[:i].get_center() for i in range(2, 7)]
self.play(
ShowCreation(all_grids),
run_time=3
)
self.wait(3)
self.play(
all_grids.shift, RIGHT * 5
)
self.play(
all_grids[:5].shift, LEFT * 3,
rate_func=smooth
)
self.play(
all_grids[:4].shift, LEFT * 2.5,
rate_func=smooth
)
self.play(
all_grids[:3].shift, LEFT * 2,
rate_func=smooth
)
self.play(
all_grids[:2].shift, LEFT * 1.5,
rate_func=smooth
)
self.wait()
self.play(
all_grids[5].shift, LEFT * 3,
rate_func=smooth
)
self.play(
all_grids[4:].shift, LEFT * 2.5,
rate_func=smooth
)
self.play(
all_grids[3:].shift, LEFT * 2,
rate_func=smooth
)
self.play(
all_grids[2:].shift, LEFT * 1.5,
rate_func=smooth
)
self.play(
all_grids.move_to, DOWN,
rate_func=smooth
)
self.wait(3)
return all_grids
def create_grid(self, rows, columns, square_length):
left_corner = Square(side_length=square_length)
grid = []
first_row = [left_corner]
for i in range(columns - 1):
square = Square(side_length=square_length)
square.next_to(first_row[i], RIGHT, buff=0)
first_row.append(square)
grid.append(first_row)
for i in range(rows - 1):
prev_row = grid[i]
# print(prev_row)
new_row = []
for square in prev_row:
# print(square)
square_below = Square(side_length=square_length)
square_below.next_to(square, DOWN, buff=0)
new_row.append(square_below)
grid.append(new_row)
return grid
def get_group_grid(self, grid):
squares = []
for row in grid:
for square in row:
squares.append(square)
return VGroup(*squares)
class BoxProblem(Scene):
def construct(self):
title = TextMobject("Box Stacking")
title.scale(1.2)
title.shift(UP * 3.5)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS - 1)
h_line.next_to(title, DOWN)
self.play(
Write(title),
ShowCreation(h_line)
)
problem = self.introduce_problem(h_line)
self.show_examples(problem, h_line, title)
# dimensions = [(5, 3, 2), (3, 2, 1)]
# dimensions = [self.convert_to_display_dimesions(d) for d in dimensions]
# boxes = self.make_stack_of_boxes(dimensions, [RED, BLUE])
# boxes = VGroup(*boxes)
# boxes.scale(0.5)
# boxes.shift(DOWN)
# self.play(
# FadeIn(boxes)
# )
# self.wait()
def convert_to_display_dimesions(self, dimension):
return (dimension[1], dimension[2], dimension[0])
def convert_to_input_dimensions(self, dimension):
return (dimension[2], dimension[0], dimension[1])
def introduce_problem(self, h_line):
problem = TextMobject(
r"Given $n$ boxes $[ (L_1, W_1, H_1), (L_2, W_2, H_2), \ldots , (L_n, W_n, H_n) ]$ where box $i$ has" + "\\\\",
r"length $L_i$, width $W_i$, and height $H_i$, find the height of the tallest possible stack." + "\\\\",
r"Box $(L_i, W_i, H_i)$ can be on top of box $(L_j, W_j, H_j) \text{ if } L_i < L_j, W_i < W_j$."
)
problem.scale(0.6)
problem.next_to(h_line, DOWN)
self.wait(2)
self.play(
Write(problem[0])
)
self.play(
Write(problem[1])
)
self.wait(3)
self.play(
Write(problem[2])
)
self.wait(13)
return problem
def show_examples(self, problem, h_line, title):
box_dims = [
(2, 3, 3),
(2, 2, 4),
(4, 4, 2),
]
display_dims = [self.convert_to_display_dimesions(d) for d in box_dims]
first_example = TextMobject(r"$[(2, 3, 3), (2, 2, 4), (4, 4, 2)]$")
first_example.scale(0.6)
first_example.next_to(problem, DOWN)
self.play(
Write(first_example)
)
scale_factor = 0.3
box_shapes = []
box_colors = [RED, GREEN_SCREEN, BLUE]
for i, display_dim in enumerate(display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i])
box.scale(scale_factor)
box_shapes.append(box)
box_shapes[0].next_to(first_example, DOWN)
box_shapes[0].shift(LEFT * 3.5)
box_shapes[1].next_to(box_shapes[0], DOWN)
box_shapes[2].next_to(box_shapes[1], DOWN)
self.play(
*[FadeIn(box) for box in box_shapes]
)
self.wait(2)
stack = self.make_stack_of_boxes(
[display_dims[2], display_dims[1]],
[box_colors[2], box_colors[1]]
)
stack = VGroup(*stack)
stack.scale(scale_factor)
stack.move_to(DOWN)
self.play(
TransformFromCopy(box_shapes[2], stack[0]),
TransformFromCopy(box_shapes[1], stack[1]),
)
self.wait()
best_height = TextMobject(r"height $= 6$")
best_height.scale(0.6)
best_height.next_to(stack, RIGHT * 2)
self.play(
FadeIn(best_height)
)
self.wait(11)
self.play(
FadeOut(best_height),
FadeOut(stack),
FadeOut(first_example),
*[FadeOut(s) for s in box_shapes],
)
box_dims = [
(4, 5, 3),
(2, 3, 2),
(3, 6, 2),
(1, 5, 4),
(2, 4, 1),
(1, 2, 2),
]
display_dims = [self.convert_to_display_dimesions(d) for d in box_dims]
second_example = TextMobject(r"$[(4, 5, 3), (2, 3, 2), (3, 6, 2), (1, 5, 4), (2, 4, 1), (1, 2, 2)]$")
second_example.scale(0.6)
second_example.next_to(problem, DOWN)
self.play(
FadeIn(second_example)
)
scale_factor = 0.3
box_shapes = []
box_colors = [RED, ORANGE, YELLOW, GREEN_SCREEN, BLUE, VIOLET]
for i, display_dim in enumerate(display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i])
box.scale(scale_factor)
box_shapes.append(box)
box_shapes[0].next_to(first_example, DOWN)
box_shapes[0].shift(LEFT * 4.5 + DOWN * 0.5)
box_shapes[1].next_to(box_shapes[0], DOWN)
box_shapes[2].next_to(box_shapes[1], DOWN)
box_shapes[3].next_to(box_shapes[0], RIGHT)
box_shapes[4].next_to(box_shapes[3], DOWN)
box_shapes[5].next_to(box_shapes[4], DOWN)
self.play(
*[FadeIn(box) for box in box_shapes]
)
self.wait(13)
stack = self.make_stack_of_boxes(
[display_dims[0], display_dims[1], display_dims[5]],
[box_colors[0], box_colors[1], box_colors[5]]
)
stack = VGroup(*stack)
stack.scale(scale_factor)
stack.move_to(DOWN + RIGHT * 1.5)
self.play(
TransformFromCopy(box_shapes[0], stack[0]),
TransformFromCopy(box_shapes[1], stack[1]),
TransformFromCopy(box_shapes[5], stack[2]),
run_time=2
)
self.wait(3)
best_height = TextMobject(r"height $= 7$")
best_height.scale(0.6)
best_height.next_to(stack, RIGHT * 2)
self.play(
FadeIn(best_height)
)
self.wait(12)
step_1 = TextMobject("1. Visualize Examples")
step_1.scale(0.8)
step_1.move_to(DOWN * 3.5)
self.play(
Write(step_1),
)
self.wait(13)
box_shapes_group = VGroup(*box_shapes)
self.play(
FadeOut(stack),
FadeOut(best_height),
FadeOut(problem[0]),
FadeOut(problem[1]),
FadeOut(second_example),
problem[2].move_to, UP * 3,
problem[2].scale, 1.2,
box_shapes_group.move_to, DOWN * 0.5,
FadeOut(title),
FadeOut(h_line),
run_time=2
)
self.wait(13)
self.visualize_example(box_shapes, stack, problem, step_1)
def visualize_example(self, box_shapes, stack, problem, step_1):
new_locations = [
RIGHT * 4 + DOWN * 2.5,
DOWN * 0.5,
RIGHT * 4 + DOWN * 0.5,
UP * 1.5,
DOWN * 2.5,
LEFT * 4 + DOWN * 1.5
]
box_shapes_copy = [box_shapes[i].copy().move_to(new_locations[i]) for i in range(len(box_shapes))]
transforms = [Transform(box_shapes[i], box_shapes_copy[i]) for i in range(len(box_shapes))]
self.play(
*transforms,
run_time=2
)
edges = {}
edges[(1, 2)] = self.connect_arrow_between(box_shapes[1], box_shapes[2])
edges[(1, 0)] = self.connect_arrow_between(box_shapes[1], box_shapes[0])
edges[(3, 2)] = self.connect_arrow_between(box_shapes[3], box_shapes[2])
edges[(4, 2)] = self.connect_arrow_between(box_shapes[4], box_shapes[2])
edges[(4, 0)] = self.connect_arrow_between(box_shapes[4], box_shapes[0])
edges[(5, 1)] = self.connect_arrow_between(box_shapes[5], box_shapes[1])
edges[(5, 4)] = self.connect_arrow_between(box_shapes[5], box_shapes[4])
edges[(5, 2)] = self.connect_arrow_between(box_shapes[5], box_shapes[2])
edges[(5, 0)] = self.connect_arrow_between(box_shapes[5], box_shapes[0])
self.wait(7)
self.play(
box_shapes[5][1].set_color, GREEN_SCREEN
)
self.play(
edges[(5, 1)].set_color, GREEN_SCREEN,
)
self.play(
box_shapes[1][1].set_color, GREEN_SCREEN
)
self.play(
edges[(1, 0)].set_color, GREEN_SCREEN,
)
self.play(
box_shapes[0][1].set_color, GREEN_SCREEN
)
stack.move_to(LEFT * 4 + UP * 1)
self.play(
TransformFromCopy(box_shapes[0], stack[0]),
TransformFromCopy(box_shapes[1], stack[1]),
TransformFromCopy(box_shapes[5], stack[2]),
run_time=2
)
self.wait(8)
step_2 = TextMobject("2. Find an appropriate subproblem")
step_2.scale(0.8)
step_2.move_to(step_1.get_center())
self.play(
FadeOut(stack),
FadeOut(problem[2]),
ReplacementTransform(step_1, step_2),
edges[(5, 1)].set_color, GRAY,
edges[(1, 0)].set_color, GRAY,
box_shapes[0][1].set_color, WHITE,
box_shapes[1][1].set_color, WHITE,
box_shapes[5][1].set_color, WHITE
)
self.wait(12)
self.show_subproblem(box_shapes, edges, step_2, problem)
def show_subproblem(self, box_shapes, edges, step_2, problem):
self.play(
box_shapes[1][1].set_color, GREEN_SCREEN,
box_shapes[5][1].set_color, GREEN_SCREEN,
edges[(5, 1)].set_color, GREEN_SCREEN,
)
self.wait(2)
self.play(
box_shapes[1][1].set_color, WHITE,
box_shapes[4][1].set_color, GREEN_SCREEN,
edges[(5, 1)].set_color, GRAY,
edges[(5, 4)].set_color, GREEN_SCREEN,
)
self.wait(2)
self.play(
box_shapes[1][1].set_color, WHITE,
box_shapes[4][1].set_color, WHITE,
box_shapes[5][1].set_color, WHITE,
box_shapes[3][1].set_color, GREEN_SCREEN,
box_shapes[2][1].set_color, GREEN_SCREEN,
edges[(3, 2)].set_color, GREEN_SCREEN,
edges[(5, 4)].set_color, GRAY,
)
self.wait(2)
self.play(
box_shapes[1][1].set_color, WHITE,
box_shapes[4][1].set_color, WHITE,
box_shapes[3][1].set_color, WHITE,
box_shapes[1][1].set_color, GREEN_SCREEN,
box_shapes[5][1].set_color, GREEN_SCREEN,
edges[(3, 2)].set_color, GRAY,
edges[(5, 1)].set_color, GREEN_SCREEN,
edges[(1, 2)].set_color, GREEN_SCREEN,
)
self.wait(2)
self.play(
box_shapes[0][1].set_color, GREEN_SCREEN,
box_shapes[1][1].set_color, GREEN_SCREEN,
box_shapes[2][1].set_color, WHITE,
box_shapes[3][1].set_color, WHITE,
box_shapes[4][1].set_color, WHITE,
box_shapes[5][1].set_color, GREEN_SCREEN,
edges[(1, 0)].set_color, GREEN_SCREEN,
edges[(5, 1)].set_color, GREEN_SCREEN,
edges[(1, 2)].set_color, GRAY,
)
self.wait(2)
self.play(
box_shapes[0][1].set_color, WHITE,
box_shapes[1][1].set_color, WHITE,
box_shapes[2][1].set_color, WHITE,
box_shapes[3][1].set_color, WHITE,
box_shapes[4][1].set_color, WHITE,
box_shapes[5][1].set_color, WHITE,
edges[(3, 2)].set_color, GRAY,
edges[(1, 0)].set_color, GRAY,
edges[(5, 1)].set_color, GRAY,
edges[(1, 2)].set_color, GRAY,
)
subproblem = TextMobject(
"Subproblem: height[$(L_i, W_i, H_i)$]" + "\\\\",
r"Largest height of stack with box $(L_i, W_i, H_i)$ at the bottom"
)
subproblem.scale(0.7)
subproblem.move_to(UP * 3.3)
self.play(
Write(subproblem[0])
)
self.play(
Write(subproblem[1])
)
self.wait(13)
ex1 = TextMobject(r"height[$(2, 3, 2)$] = 4")
ex1.scale(0.7)
ex1[0][7:14].set_color(ORANGE)
ex1.move_to(LEFT * 4 + UP * 1.8)
ex2 = TextMobject(r"height[$(3, 6, 2)$] = 6")
ex2.scale(0.7)
ex2[0][7:14].set_color(YELLOW)
ex2.next_to(ex1, DOWN)
ex3 = TextMobject(r"height[$(4, 5, 3)$] = 7")
ex3.scale(0.7)
ex3[0][7:14].set_color(RED)
ex3.next_to(ex2, DOWN)
self.play(
Write(ex1[0][:-2])
)
self.play(
box_shapes[5][1].set_color, GREEN_SCREEN,
edges[(5, 1)].set_color, GREEN_SCREEN,
box_shapes[1][1].set_color, GREEN_SCREEN
)
self.play(
Write(ex1[0][-2:])
)
self.wait(3)
self.play(
box_shapes[5][1].set_color, WHITE,
edges[(5, 1)].set_color, GRAY,
box_shapes[1][1].set_color, WHITE
)
self.wait()
self.play(
Write(ex2[0][:-2])
)
self.play(
box_shapes[2][1].set_color, GREEN_SCREEN,
edges[(3, 2)].set_color, GREEN_SCREEN,
box_shapes[3][1].set_color, GREEN_SCREEN
)
self.play(
Write(ex2[0][-2:])
)
self.wait(9)
self.play(
box_shapes[2][1].set_color, WHITE,
edges[(3, 2)].set_color, GRAY,
box_shapes[3][1].set_color, WHITE
)
self.wait()
self.play(
Write(ex3[0][:-2])
)
self.play(
box_shapes[0][1].set_color, GREEN_SCREEN,
edges[(1, 0)].set_color, GREEN_SCREEN,
box_shapes[1][1].set_color, GREEN_SCREEN,
edges[(5, 1)].set_color, GREEN_SCREEN,
box_shapes[5][1].set_color, GREEN_SCREEN
)
self.play(
Write(ex3[0][-2:])
)
self.wait(10)
self.play(
box_shapes[0][1].set_color, WHITE,
edges[(1, 0)].set_color, GRAY,
box_shapes[1][1].set_color, WHITE,
edges[(5, 1)].set_color, GRAY,
box_shapes[5][1].set_color, WHITE
)
self.wait()
box_nodes = VGroup(*box_shapes)
box_edges = VGroup(*[edges[key] for key in edges])
box_graph = VGroup(box_nodes, box_edges)
step_3 = TextMobject("3. Find relationships among subproblems")
step_3.scale(0.8)
step_3.move_to(step_2.get_center())
self.play(
box_graph.scale, 0.8,
box_graph.shift, UP * 1,
FadeOut(ex1),
FadeOut(ex2),
FadeOut(ex3),
ReplacementTransform(step_2, step_3)
)
self.wait(8)
self.find_relationships(box_shapes, edges, box_graph, step_3, subproblem)
def find_relationships(self, box_shapes, edges, box_graph, step_3, subproblem):
subproblems_needed = TextMobject("What subproblems are needed to solve height$[(4, 5, 3)]$?")
subproblems_needed.scale(0.8)
subproblems_needed[0][-9:-2].set_color(RED)
subproblems_needed.next_to(box_graph, DOWN)
self.play(
Write(subproblems_needed)
)
dest_rectangle = SurroundingRectangle(box_shapes[0], buff=SMALL_BUFF)
dest_rectangle.set_color(RED)
self.play(
ShowCreation(dest_rectangle)
)
self.wait()
self.play(
edges[(1, 0)].set_color, GREEN_SCREEN,
edges[(4, 0)].set_color, GREEN_SCREEN,
edges[(5, 0)].set_color, GREEN_SCREEN,
)
self.wait(5)
subproblem_rects = [SurroundingRectangle(box_shapes[i], buff=SMALL_BUFF) for i in [1, 4, 5]]
necessary_subproblems = TextMobject(r"height$\left[ (2, 3, 2) \right] = 4 \quad$",
r"height$\left[ (2, 4, 1) \right] = 3 \quad$",
r"height$\left[ (1, 2, 2) \right] = 2$")
necessary_subproblems.scale(0.8)
necessary_subproblems[0][-9:-3].set_color(ORANGE)
necessary_subproblems[1][-9:-3].set_color(BLUE)
necessary_subproblems[2][-9:-3].set_color(VIOLET)
necessary_subproblems.next_to(subproblems_needed, DOWN)
self.play(
ShowCreation(subproblem_rects[0])
)
self.play(
Write(necessary_subproblems[0])
)
self.wait()
self.play(
ShowCreation(subproblem_rects[1])
)
self.play(
Write(necessary_subproblems[1])
)
self.wait(3)
self.play(
ShowCreation(subproblem_rects[2])
)
self.play(
Write(necessary_subproblems[2])
)
self.wait(10)
using_subproblems = TextMobject(r"How do we use these subproblems to solve height[$(4, 5, 3)$]?")
using_subproblems.scale(0.8)
using_subproblems[0][-9:-2].set_color(RED)
using_subproblems.move_to(subproblems_needed.get_center())
self.play(
ReplacementTransform(subproblems_needed, using_subproblems)
)
self.wait(5)
self.play(
FadeOut(necessary_subproblems)
)
self.wait(2)
answer = TextMobject(
r"height$[(4, 5, 3)] = 3 + \text{max} \{ \text{height} [(2, 3, 2)], \text{height} [(2, 4, 1)], \text{height} [(1, 2, 2)] \}$",
r" $= 7$")
answer.scale(0.7)
answer[0][7:14].set_color(RED)
answer[0][16].set_color(RED)
answer[0][29:36].set_color(ORANGE)
answer[0][45:52].set_color(BLUE)
answer[0][61:68].set_color(VIOLET)
answer.move_to(necessary_subproblems.get_center())
self.play(
Write(answer[0][:15])
)
self.wait()
self.play(
Write(answer[0][15:]),
run_time=2
)
self.wait()
self.play(
Write(answer[1])
)
self.wait(10)
step_4 = TextMobject("4. Generalize the relationship")
step_4.scale(0.8)
step_4.move_to(step_3.get_center())
self.play(
FadeOut(box_graph),
FadeOut(using_subproblems),
FadeOut(answer),
FadeOut(subproblem),
FadeOut(dest_rectangle),
*[FadeOut(sr) for sr in subproblem_rects],
ReplacementTransform(step_3, step_4),
run_time=2
)
def connect_arrow_between(self, box1, box2, animate=True):
start_point = box1.get_right()
end_point = box2.get_left()
arrow = Arrow(start_point, end_point, tip_length=0.2)
arrow.set_color(GRAY)
if animate:
self.play(
ShowCreation(arrow)
)
return arrow
def construct_box(self, l=3, w=2, h=1, color=BLUE, label=True, label_direction=RIGHT):
box = Prism(dimensions=[l, w, h])
box.set_color(color)
box.set_stroke(WHITE, 2)
box.pose_at_angle()
if label:
label_text = TextMobject("({0}, {1}, {2})".format(*self.convert_to_input_dimensions((l, w, h))))
label_text.scale(1.8)
label_text.next_to(box, label_direction)
box = VGroup(box, label_text)
return box
# def make_stack_from_indices(self, boxes, indices):
# stack = []
# for i in indices:
# box = boxes[i].copy()
# stack.insert(0, box)
# for i in range(len(stack) - 1):
# self.put_box_on_top(stack[i + 1], stack[i])
# return VGroup(*stack)
def make_stack_of_boxes(self, boxes, colors):
stack = []
for i, dimension in enumerate(boxes):
l, w, h = dimension
box = self.construct_box(l=l, w=w, h=h, color=colors[i])
stack.append(box)
for i in range(len(stack) - 1):
self.put_box_on_top(stack[i + 1], stack[i])
return stack
def put_box_on_top(self, top, bottom):
display_length = bottom[0].dimensions[2]
top[0].next_to(bottom[0], UP, buff=0)
top[0].shift(DOWN * 0.25 * display_length)
top[1].next_to(top[0], RIGHT)
# display_height = bottom[0].dimensions[1]
# top.shift(UP * 0.7 * display_height)
class LIS(Scene):
def construct(self):
title = TextMobject("Longest Increasing Subsequence (LIS)")
title.scale(1.2)
title.shift(UP * 3.5)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS - 1)
h_line.next_to(title, DOWN)
self.play(
Write(title),
ShowCreation(h_line)
)
self.wait(4)
problem = self.problem_statement(h_line)
self.show_examples(problem, h_line)
self.wait()
# array = [5, 2, 8, 6, 3, 6, 9, 7]
# # array = [8, 2, 9, 4, 5, 7, 3]
# graph, edge_dict = self.construct_graph(array)
# nodes, edges = self.make_graph_mobject(graph, edge_dict)
# entire_graph = VGroup(nodes, edges)
# entire_graph.move_to(ORIGIN)
# entire_graph.scale(0.8)
# entire_graph.shift(DOWN * 2)
# self.play(
# FadeIn(entire_graph)
# )
def problem_statement(self, h_line):
problem = TextMobject(
r"For a sequence $a_1, a_2, \ldots , a_n$, find the length of the" + "\\\\",
r"longest increasing subsequence $a_{i_1}, a_{i_2}, \ldots , a_{i_k}$" + "\\\\",
r"Constraints: $i_1 < i_2 < \cdots < i_k$; $a_{i_1} < a_{i_2} < \cdots < a_{i_k}$"
)
problem.scale(0.8)
problem.next_to(h_line, DOWN)
self.play(
Write(problem[0])
)
self.wait()
self.play(
Write(problem[1])
)
self.wait(3)
self.play(
Write(problem[2])
)
return problem
def show_examples(self, problem, h_line):
ex1 = TextMobject(r"LIS($\left[ 3 \quad 1 \quad 8 \quad 2 \quad 5 \right]$)", r"$\rightarrow 3$")
ex1[0][:3].set_color(MONOKAI_BLUE)
ex1[0][5:10].set_color(MONOKAI_PURPLE)
# ex1[7].set_color(MONOKAI_PURPLE)
# ex1[9].set_color(MONOKAI_PURPLE)
# ex1[11].set_color(MONOKAI_PURPLE)
# ex1[13].set_color(MONOKAI_PURPLE)
ex1.scale(0.8)
ex1.next_to(problem, DOWN)
self.wait(7)
self.play(
Write(ex1[0])
)
self.wait(2)
arrow_1 = CurvedArrow(
ex1[0][6].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex1[0][8].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_2 = CurvedArrow(
ex1[0][8].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex1[0][9].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_1.set_color(GREEN_SCREEN)
arrow_2.set_color(GREEN_SCREEN)
ex1[0][6].set_color(GREEN_SCREEN)
self.play(
ShowCreation(arrow_1)
)
ex1[0][8].set_color(GREEN_SCREEN)
self.play(
ShowCreation(arrow_2)
)
ex1[0][9].set_color(GREEN_SCREEN)
self.wait(2)
self.play(
Write(ex1[1])
)
self.wait(6)
ex2 = TextMobject(r"LIS($\left[ 5 \quad 2 \quad 8 \quad 6 \quad 3 \quad 6 \quad 9 \quad 5 \right]$)",
r"$\rightarrow 4$")
ex2.scale(0.8)
ex2.move_to(ex1.get_center())
ex2.shift(DOWN * 1)
ex2[0][:3].set_color(MONOKAI_BLUE)
ex2[0][5:13].set_color(MONOKAI_PURPLE)
self.play(
Write(ex2[0])
)
arrow_3 = CurvedArrow(
ex2[0][6].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][9].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_4 = CurvedArrow(
ex2[0][9].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][10].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_5 = CurvedArrow(
ex2[0][10].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][11].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
self.wait(4)
arrow_3.set_color(GREEN_SCREEN)
arrow_4.set_color(GREEN_SCREEN)
arrow_5.set_color(GREEN_SCREEN)
ex2[0][6].set_color(GREEN_SCREEN)
self.play(
ShowCreation(arrow_3)
)
ex2[0][9].set_color(GREEN_SCREEN)
self.play(
ShowCreation(arrow_4)
)
ex2[0][10].set_color(GREEN_SCREEN)
self.play(
ShowCreation(arrow_5)
)
ex2[0][11].set_color(GREEN_SCREEN)
self.play(
Write(ex2[1])
)
self.wait(5)
focus = TextMobject("We will focus on the length of the LIS")
focus[0][16:22].set_color(YELLOW)
focus.scale(0.8)
focus.move_to(DOWN * 1.5)
self.play(
Write(focus),
problem[0][-11:-5].set_color, YELLOW
)
self.wait(6)
step_1 = TextMobject("1. Visualize Examples")
step_1.scale(0.8)
step_1.move_to(DOWN * 3.5)
self.play(
Write(step_1)
)
self.wait(10)
self.play(
Indicate(problem[2])
)
self.wait()
self.play(
problem[2].set_color, YELLOW
)
self.wait()
self.wait(5)
to_fade = [
ex2,
arrow_3,
arrow_4,
arrow_5,
problem,
focus,
]
ex1_with_arrows = VGroup(ex1, arrow_1, arrow_2)
self.play(
ex1_with_arrows.shift, UP * 1.5,
*[FadeOut(obj) for obj in to_fade],
run_time=2
)
self.wait(5)
graph, edge_dict = self.construct_graph([3, 1, 8, 2, 5])
nodes, edges = self.make_graph_mobject(graph, edge_dict)
entire_graph = VGroup(nodes, edges)
entire_graph.move_to(ORIGIN)
transforms = []
for i in range(5, 10):
transform = TransformFromCopy(ex1[0][i], nodes[i - 5])
transforms.append(transform)
self.play(
*transforms,
run_time=2
)
self.wait(3)
self.play(
ShowCreation(edges),
rate_func=linear,
run_time=6
)
self.wait(5)
highlight_objs = []
circle = self.highlight_node(graph, 1)
self.play(
edge_dict[(1, 3)].set_color, GREEN_SCREEN
)
highlight_objs.append(circle)
circle = self.highlight_node(graph, 3)
self.play(
edge_dict[(3, 4)].set_color, GREEN_SCREEN
)
highlight_objs.append(circle)
circle = self.highlight_node(graph, 4)
highlight_objs.append(circle)
self.wait(7)
observation = TextMobject("LIS = Longest Path in DAG + 1")
observation.scale(0.8)
observation.next_to(step_1, UP)
self.play(
Write(observation)
)
self.wait(15)
step_2 = TextMobject("2. Find an appropriate subproblem")
step_2.scale(0.8)
step_2.move_to(step_1.get_center())
self.play(
ReplacementTransform(step_1, step_2)
)
self.wait(5)
self.play(
FadeOut(observation)
)
self.wait(9)
subsets = TextMobject("All increasing subsequences are subsets of original sequence.")
subsets.scale(0.8)
subsets.next_to(entire_graph, DOWN)
self.play(
Write(subsets)
)
self.wait(5)
start_end = TextMobject("All increasing subsequences have a start and end.")
start_end.scale(0.8)
start_end.next_to(subsets, DOWN)
start_end[0][-4:-1].set_color(ORANGE)
start_end[0][-12:-7].set_color(RED)
self.play(
Write(start_end)
)
self.wait()
start_rect = SurroundingRectangle(highlight_objs[0], buff=SMALL_BUFF, color=RED)
end_rect = SurroundingRectangle(highlight_objs[2], buff=SMALL_BUFF, color=ORANGE)
self.play(
ShowCreation(start_rect),
ShowCreation(end_rect)
)
self.wait(9)
self.play(
FadeOut(start_rect),
FadeOut(subsets),
start_end.move_to, subsets.get_center()
)
self.wait()
ending_focus = TextMobject("Let's focus on the end index of an increasing subsequence")
ending_focus.scale(0.8)
ending_focus.next_to(subsets, DOWN)
ending_focus[0][15:18].set_color(ORANGE)
self.play(
FadeIn(ending_focus)
)
self.play(
end_rect.move_to, nodes[3].get_center(),
run_time=1
)
self.play(
end_rect.move_to, nodes[2].get_center(),
run_time=1
)
self.play(
end_rect.move_to, nodes[1].get_center(),
run_time=1
)
self.play(
end_rect.move_to, nodes[0].get_center(),
run_time=1
)
self.wait(3)
self.play(
FadeOut(start_end),
FadeOut(ending_focus),
FadeOut(end_rect)
)
subproblem = TextMobject(r"Subproblem: LIS$\left[ k \right] = \text{LIS ending at index} \> k$")
subproblem.scale(0.8)
subproblem.move_to(observation.get_center())
self.play(
Write(subproblem)
)
self.wait(5)
example = TextMobject(r"LIS$\left[ 3 \right] =$", r"$\> 2$")
example.scale(0.8)
example.next_to(subproblem, UP)
subproblem_graph, other = self.get_subproblem_object(nodes, edge_dict, 3)
box = SurroundingRectangle(subproblem_graph, buff=SMALL_BUFF)
self.play(
Write(example[0])
)
self.play(
ShowCreation(box)
)
self.wait(4)
# self.play(
# FadeOut(other),
# FadeOut(highlight_objs[-1])
# )
self.play(
Write(example[1])
)
self.wait(12)
step_3 = TextMobject("3. Find relationships among subproblems")
step_3.scale(0.8)
step_3.move_to(step_2.get_center())
self.play(
edge_dict[(1, 3)].set_color, GRAY,
edge_dict[(3, 4)].set_color, GRAY,
ReplacementTransform(step_2, step_3),
FadeOut(box),
FadeOut(example),
FadeOut(ex1),
FadeOut(arrow_1),
FadeOut(arrow_2),
FadeOut(subproblem),
*[FadeOut(obj) for obj in highlight_objs],
run_time=2
)
self.wait()
subproblem.move_to(ex1.get_center())
self.play(
Write(subproblem)
)
self.wait(7)
surround_circle = self.highlight_node(graph, 4)
self.wait()
question = TextMobject(r"What subproblems are needed to solve LIS$\left[ 4 \right]$?")
question[0][-7:-1].set_color(GREEN_SCREEN)
question.scale(0.8)
question.next_to(entire_graph, DOWN)
self.play(
Write(question),
run_time=2
)
self.wait(6)
self.play(
edge_dict[(0, 4)].set_color, GREEN_SCREEN
)
self.wait()
subproblem_graph, other = self.get_subproblem_object(nodes, edge_dict, 0)
box = SurroundingRectangle(subproblem_graph, buff=SMALL_BUFF)
necessary_subproblems = TextMobject(r"LIS$\left[ 0 \right] = 1 \quad$", r"LIS$\left[ 1 \right] = 1 \quad$",
r"LIS$\left[ 3 \right] = 2$")
necessary_subproblems.scale(0.8)
necessary_subproblems.set_color(YELLOW)
necessary_subproblems.next_to(question, DOWN)
self.play(
ShowCreation(box),
)
self.wait(5)
self.play(
Write(necessary_subproblems[0])
)
self.wait(2)
subproblem_graph, other = self.get_subproblem_object(nodes, edge_dict, 1)
new_box = SurroundingRectangle(subproblem_graph, buff=SMALL_BUFF)
self.play(
edge_dict[(0, 4)].set_color, GRAY,
edge_dict[(1, 4)].set_color, GREEN_SCREEN,
Transform(box, new_box),
)
self.wait(2)
self.play(
Write(necessary_subproblems[1])
)
self.wait(4)
subproblem_graph, other = self.get_subproblem_object(nodes, edge_dict, 3)
new_box = SurroundingRectangle(subproblem_graph, buff=SMALL_BUFF)
self.play(
edge_dict[(1, 4)].set_color, GRAY,
edge_dict[(3, 4)].set_color, GREEN_SCREEN,
Transform(box, new_box),
)
self.wait()
self.play(
Write(necessary_subproblems[2])
)
self.wait(2)
# self.play(
# edge_dict[(3, 4)].set_color, GRAY,
# FadeOut(box),
# )
relationship = TextMobject(r"How do we use these subproblems to solve LIS$\left[ 4 \right]$?")
relationship[-7:-1].set_color(GREEN_SCREEN)
relationship.scale(0.8)
relationship.move_to(question.get_center())
self.play(
ReplacementTransform(question, relationship)
)
self.wait()
answer = TextMobject(
r"LIS$\left[ 4 \right] = 1 + \text{max} \{ \text{LIS} \left[ 0 \right], \text{LIS} \left[ 1 \right], \text{LIS} \left[ 3 \right] \}$",
r" $= 3$")
answer.scale(0.8)
answer.set_color(YELLOW)
answer.move_to(necessary_subproblems.get_center())
self.play(
FadeOut(necessary_subproblems)
)
self.play(
Write(answer[0])
)
self.wait()
highlight_objs = []
highlight_objs.append(surround_circle)
circle = self.highlight_node(graph, 1)
self.play(
edge_dict[(1, 3)].set_color, GREEN_SCREEN
)
highlight_objs.append(circle)
circle = self.highlight_node(graph, 3)
self.play(
box.set_color, GREEN_SCREEN,
answer[0][-7:-1].set_color, GREEN_SCREEN
)
highlight_objs.append(circle)
self.play(
Write(answer[1])
)
self.wait(22)
step_4 = TextMobject("4. Generalize the relationship")
step_4.scale(0.8)
step_4.move_to(step_3.get_center())
self.play(
*[FadeOut(obj) for obj in highlight_objs],
FadeOut(entire_graph),
FadeOut(relationship),
FadeOut(answer),
FadeOut(subproblem),
FadeOut(box),
ReplacementTransform(step_3, step_4)
)
self.wait()
ex2 = TextMobject(r"$A = \left[ 5 \quad 2 \quad 8 \quad 6 \quad 3 \quad 6 \quad 9 \quad 5 \right]$")
ex2.scale(0.8)
ex2.move_to(ex1.get_center())
# ex2.shift(DOWN * 1)
ex2[0][2:11].set_color(MONOKAI_PURPLE)
arrow_3 = CurvedArrow(
ex2[0][4].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][7].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_4 = CurvedArrow(
ex2[0][7].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][8].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_5 = CurvedArrow(
ex2[0][8].get_center() + DOWN * 0.2 + RIGHT * SMALL_BUFF,
ex2[0][9].get_center() + DOWN * 0.2 + LEFT * SMALL_BUFF,
tip_length=0.1
)
arrow_3.set_color(GREEN_SCREEN)
arrow_4.set_color(GREEN_SCREEN)
arrow_5.set_color(GREEN_SCREEN)
ex2[0][4].set_color(GREEN_SCREEN)
ex2[0][7].set_color(GREEN_SCREEN)
ex2[0][8].set_color(GREEN_SCREEN)
ex2[0][9].set_color(GREEN_SCREEN)
self.play(
FadeIn(ex2[0]),
ShowCreation(arrow_3),
ShowCreation(arrow_4),
ShowCreation(arrow_5)
)
self.wait(2)
graph, edge_dict = self.construct_graph([5, 2, 8, 6, 3, 6, 9, 5], direction=UP, angle=-TAU / 4)
nodes, edges = self.make_graph_mobject(graph, edge_dict)
entire_graph = VGroup(nodes, edges)
scale_factor = 0.8
entire_graph.scale(scale_factor)
entire_graph.move_to(UP * 0.8)
self.play(
FadeIn(nodes)
)
self.wait()
question = TextMobject(r"How do we solve subproblem LIS$\left[ 5 \right]$?")
question[0][-7:-1].set_color(GREEN_SCREEN)
question.scale(scale_factor)
question.next_to(entire_graph, DOWN)
question.shift(DOWN * 0.8)
self.play(
Write(question)
)
self.wait()
surround_circle = self.highlight_node(graph, 5, scale_factor=scale_factor)
self.wait(2)
answer = TextMobject(r"LIS$\left[ 5 \right] = 1 + \text{max}\{ \text{LIS}[k] \mid k < 5, A[k] < A[5] \}$")
answer[0][:6].set_color(GREEN_SCREEN)
answer.scale(scale_factor)
answer.next_to(question, DOWN)
self.play(
Write(answer)
)
self.wait(8)
simplification = TextMobject(
r"$= 1 + \text{max} \{ \text{LIS} \left[ 0 \right] , \text{LIS} \left[ 1 \right] , \text{LIS} \left[ 4 \right] \}$")
simplification.scale(scale_factor)
simplification.next_to(answer, DOWN)
simplification.shift(RIGHT * 0.2)
self.play(
Write(simplification[0][:7])
)
arrow = Arrow(nodes[0].get_center() + DOWN * 1.2, nodes[0].get_center() + DOWN * 0.2)
arrow.set_color(GREEN_SCREEN)
k_equal = TextMobject("k = ")
val = Integer(0)
k_val = VGroup(k_equal, val).arrange_submobjects(RIGHT, buff=SMALL_BUFF * 2 + DOWN * SMALL_BUFF)
k_val.scale(0.8)
k_val.next_to(arrow, DOWN, buff=0)
tracker = VGroup(arrow, k_val)
self.play(
ShowCreation(tracker[0]),
Write(tracker[1])
)
self.wait(2)
box = SurroundingRectangle(nodes[:1], buff=SMALL_BUFF)
self.play(
ShowCreation(box),
ShowCreation(edge_dict[(0, 5)])
)
simplification[0][7:-1].set_color(YELLOW)
self.play(
Write(simplification[0][7:13])
)
self.wait(3)
shift_value = nodes[1].get_center() - nodes[0].get_center()
self.play(
tracker.shift, shift_value,
val.shift, shift_value,
val.increment_value,
FadeOut(box),
run_time=1
)
box = SurroundingRectangle(nodes[:2], buff=SMALL_BUFF)
self.play(
ShowCreation(box),
ShowCreation(edge_dict[(1, 5)])
)
self.play(
Write(simplification[0][13:20])
)
self.wait()
self.play(
tracker.shift, shift_value,
tracker[0].set_color, RED,
tracker[0].shift, shift_value,
val.shift, shift_value,
val.increment_value, 2,
FadeOut(box),
run_time=1
)
self.play(
tracker.shift, shift_value,
val.shift, shift_value,
val.increment_value, 3,
run_time=1
)
self.play(
tracker.shift, shift_value,
tracker[0].set_color, GREEN_SCREEN,
tracker[0].shift, shift_value,
val.shift, shift_value,
val.increment_value, 4,
run_time=1
)
box = SurroundingRectangle(nodes[:5], buff=SMALL_BUFF)
self.play(
ShowCreation(box),
ShowCreation(edge_dict[(4, 5)])
)
self.play(
Write(simplification[0][20:])
)
self.wait()
self.play(
FadeOut(box),
FadeOut(edge_dict[(0, 5)]),
FadeOut(edge_dict[(1, 5)]),
FadeOut(edge_dict[(4, 5)]),
FadeOut(surround_circle),
FadeOut(tracker),
FadeOut(simplification)
)
new_question = TextMobject(r"How do we solve subproblem LIS$\left[ n \right]$?")
new_question[0][-7:-1].set_color(BLUE)
new_question.scale(scale_factor)
new_question.next_to(entire_graph, DOWN)
new_question.shift(DOWN * 0.8)
self.play(
Transform(question, new_question)
)
self.wait(2)
new_answer = TextMobject(r"LIS$\left[ n \right] = 1 + \text{max}\{ \text{LIS}[k] \mid k < n, A[k] < A[n] \}$")
new_answer[0][:6].set_color(BLUE)
new_answer.scale(scale_factor)
new_answer.next_to(question, DOWN)
self.play(
Transform(answer, new_answer)
)
self.wait(15)
step_5 = TextMobject("5. Implement by solving subproblems in order")
step_5.scale(0.8)
step_5.move_to(step_4.get_center())
self.play(
ReplacementTransform(step_4, step_5)
)
self.play(
FadeOut(nodes),
FadeOut(question),
FadeOut(ex2),
FadeOut(arrow_3),
FadeOut(arrow_4),
FadeOut(arrow_5),
answer.move_to, problem.get_center() + UP * 0.5,
run_time=2
)
graph, edge_dict = self.construct_graph([5, 2, 8, 6, 3, 6, 9, 5])
nodes, edges = self.make_graph_mobject(graph, edge_dict)
entire_graph = VGroup(nodes, edges)
scale_factor = 0.55
entire_graph.scale(scale_factor)
entire_graph.move_to(DOWN * 2)
self.play(
ShowCreation(entire_graph),
run_time=2
)
self.wait(15)
arrow = Arrow(nodes[0].get_center() + LEFT * 0.5, nodes[-1].get_center() + RIGHT * 0.5)
arrow.set_color(GREEN_SCREEN)
arrow.next_to(entire_graph, UP, buff=SMALL_BUFF)
self.play(
ShowCreation(arrow)
)
self.wait(2)
code = self.generate_code()
code.next_to(answer, DOWN)
self.play(
Write(code[0])
)
self.wait(2)
self.play(
Write(code[1])
)
self.wait(6)
self.play(
Write(code[2])
)
self.wait(5)
self.play(
Write(code[3]),
FadeOut(arrow),
)
self.wait()
self.play(
Write(code[4]),
)
self.wait()
self.play(
Write(code[5])
)
self.wait(16)
self.play(
FadeOut(code),
FadeOut(entire_graph),
FadeOut(step_5),
FadeOut(answer)
)
self.wait()
reminder = TextMobject("Note: this gives us the length of LIS")
reminder.scale(0.8)
reminder.next_to(h_line, DOWN)
new_question = TextMobject("How do we actually get the sequence?")
new_question.scale(0.8)
new_question.move_to(reminder.get_center())
self.play(
Write(reminder)
)
graph, edge_dict = self.construct_graph([3, 1, 8, 2, 5])
nodes, edges = self.make_graph_mobject(graph, edge_dict)
entire_graph = VGroup(nodes, edges)
entire_graph.move_to(ORIGIN)
circle = self.highlight_node(graph, 1, animate=False)
edge_dict[(1, 3)].set_color(GREEN_SCREEN)
highlight_objs.append(circle)
circle = self.highlight_node(graph, 3, animate=False)
edge_dict[(3, 4)].set_color(GREEN_SCREEN)
highlight_objs.append(circle)
circle = self.highlight_node(graph, 4, animate=False)
highlight_objs.append(circle)
self.play(
FadeIn(entire_graph),
*[FadeIn(c) for c in highlight_objs]
)
self.wait(5)
self.play(
ReplacementTransform(reminder, new_question)
)
self.wait(5)
sequence_answer = TextMobject("Keep track of previous indices!")
sequence_answer.scale(0.8)
sequence_answer.next_to(new_question, DOWN)
self.play(
Write(sequence_answer)
)
self.wait(5)
i_text = TexMobject("i").scale(0.8)
j_text = TexMobject("j").scale(0.8)
i_text.next_to(nodes[4], UP)
j_text.next_to(nodes[3], UP)
prev_def = TextMobject(r"prev$[i] = j$")
prev_def.scale(0.8)
prev_def.next_to(entire_graph, DOWN)
self.play(
FadeIn(i_text),
Write(prev_def[0][:-1])
)
self.wait(5)
self.play(
FadeIn(j_text),
Write(prev_def[0][-1])
)
self.wait()
prev_def_meaning = TextMobject("Previous index used to solve LIS$[i]$ is index $j$")
prev_def_meaning.scale(0.8)
prev_def_meaning.next_to(prev_def, DOWN)
self.play(
Write(prev_def_meaning)
)
self.wait(7)
examples = []
prev_4 = TextMobject(r"prev$[4] = 3$")
prev_3 = TextMobject(r"prev$[3] = 1$")
prev_2 = TextMobject(r"prev$[2] = 0$")
prev_1 = TextMobject(r"prev$[1] = -1$")
prev_0 = TextMobject(r"prev$[0] = -1$")
examples.extend([prev_0, prev_1, prev_2, prev_3, prev_4])
examples_group = VGroup(*examples).arrange_submobjects(RIGHT, buff=SMALL_BUFF * 5)
examples_group.scale(0.7)
examples_group.next_to(prev_def_meaning, DOWN)
circle_0 = self.highlight_node(graph, 0, animate=False)
self.play(
Write(examples[0]),
*[FadeOut(obj) for obj in highlight_objs],
FadeOut(i_text),
FadeOut(j_text),
edge_dict[(1, 3)].set_color, GRAY,
edge_dict[(3, 4)].set_color, GRAY,
FadeIn(circle_0),
)
self.wait(5)
circle_1 = self.highlight_node(graph, 1, animate=False)
self.play(
Write(examples[1]),
FadeOut(circle_0),
FadeIn(circle_1)
)
self.wait(3)
circle_2 = self.highlight_node(graph, 2, animate=False)
self.play(
Write(examples[2]),
FadeOut(circle_1),
FadeIn(circle_2),
edge_dict[(0, 2)].set_color, GREEN_SCREEN,
)
self.wait(8)
circle_3 = self.highlight_node(graph, 3, animate=False)
self.play(
Write(examples[3]),
edge_dict[(0, 2)].set_color, GRAY,
edge_dict[(1, 3)].set_color, GREEN_SCREEN,
FadeOut(circle_2),
FadeIn(circle_3),
)
self.wait(3)
circle_4 = self.highlight_node(graph, 4, animate=False)
self.play(
Write(examples[4]),
edge_dict[(3, 4)].set_color, GREEN_SCREEN,
edge_dict[(1, 3)].set_color, GRAY,
FadeOut(circle_3),
FadeIn(circle_4),
)
self.wait(18)
def generate_code(self):
code_scale = 0.7
code = []
def_statement = TextMobject("def ", r"$\text{lis}(A):$")
def_statement[0].set_color(MONOKAI_BLUE)
def_statement[1][:3].set_color(MONOKAI_GREEN)
def_statement[1][4].set_color(MONOKAI_ORANGE)
def_statement.scale(code_scale)
def_statement.to_edge(LEFT)
line_1 = TextMobject(r"$L = [1]$ * len($A$)")
line_1.scale(code_scale)
line_1.next_to(def_statement, DOWN * 0.5)
line_1.to_edge(LEFT * 2)
line_1[0][5].shift(DOWN * SMALL_BUFF)
line_1[0][1].set_color(MONOKAI_PINK)
line_1[0][3].set_color(MONOKAI_PURPLE)
line_1[0][5].set_color(MONOKAI_PINK)
line_1[0][6:9].set_color(MONOKAI_BLUE)
code.extend([def_statement, line_1])
line_2 = TextMobject(r"for $i$ in range(1, len($L$)):")
line_2.scale(code_scale)
line_2.next_to(line_1, DOWN * 0.5)
line_2.to_edge(LEFT * 2)
line_2[0][:3].set_color(MONOKAI_PINK)
line_2[0][4:6].set_color(MONOKAI_PINK)
line_2[0][6:11].set_color(MONOKAI_BLUE)
line_2[0][12].set_color(MONOKAI_PURPLE)
line_2[0][14:17].set_color(MONOKAI_BLUE)
code.append(line_2)
line_3 = TextMobject(r"subproblems $= [L[k] \text{ for } k \text{ in range} (i) \text{ if } A[k] < A[i]]$")
line_3.scale(code_scale)
line_3.next_to(line_2, DOWN * 0.5)
line_3.to_edge(LEFT * 3)
line_3[0][11].set_color(MONOKAI_PINK)
line_3[0][17:20].set_color(MONOKAI_PINK)
line_3[0][21:23].set_color(MONOKAI_PINK)
line_3[0][23:28].set_color(MONOKAI_BLUE)
line_3[0][31:33].set_color(MONOKAI_PINK)
line_3[0][37].set_color(MONOKAI_PINK)
code.append(line_3)
line_4 = TextMobject(r"$L[i] = 1 + \text{max}$(subproblems, default$=$0)")
line_4.scale(code_scale)
line_4.next_to(line_3, DOWN * 0.5)
line_4.to_edge(LEFT * 3)
line_4[0][4].set_color(MONOKAI_PINK)
line_4[0][5].set_color(MONOKAI_PURPLE)
line_4[0][6].set_color(MONOKAI_PINK)
line_4[0][7:10].set_color(MONOKAI_BLUE)
line_4[0][23:30].set_color(MONOKAI_ORANGE)
line_4[0][30].set_color(MONOKAI_PINK)
line_4[0][31].set_color(MONOKAI_PURPLE)
code.append(line_4)
line_5 = TextMobject(r"return max($L$, default$=$0)")
line_5.scale(code_scale)
line_5[0][:6].set_color(MONOKAI_PINK)
line_5[0][6:9].set_color(MONOKAI_BLUE)
line_5[0][12:19].set_color(MONOKAI_ORANGE)
line_5[0][19].set_color(MONOKAI_PINK)
line_5[0][20].set_color(MONOKAI_PURPLE)
line_5.next_to(line_4, DOWN * 0.5)
line_5.to_edge(LEFT * 2)
code.append(line_5)
return VGroup(*code)
def get_subproblem_object(self, nodes, edge_dict, k):
subproblem = VGroup()
other = VGroup()
for i in range(len(nodes)):
if i <= k:
subproblem.add(nodes[i])
else:
other.add(nodes[i])
for key in edge_dict:
if key[1] <= k:
subproblem.add(edge_dict[key])
else:
other.add(edge_dict[key])
return subproblem, other
def construct_graph(self, sequence, direction=DOWN, angle=TAU / 4):
nodes = []
edges = {}
current = ORIGIN
radius, scale = 0.4, 0.9
for i in range(len(sequence)):
node = GraphNode(sequence[i], position=current, radius=radius, scale=scale)
nodes.append(node)
current = current + RIGHT * 1.5
for i in range(len(sequence)):
for j in range(len(sequence)):
if i < j and sequence[i] < sequence[j]:
if i % 2 == 0:
edges[(i, j)] = nodes[i].connect_curved_arrow(nodes[j], angle=-TAU / 4)
else:
edges[(i, j)] = nodes[i].connect_curved_arrow(nodes[j], direction=direction, angle=angle)
return nodes, edges
def make_graph_mobject(self, graph, edge_dict, node_color=DARK_BLUE_B,
stroke_color=BLUE, data_color=WHITE, edge_color=GRAY, scale_factor=1,
show_data=True):
nodes = []
edges = []
for node in graph:
node.circle.set_fill(color=node_color, opacity=0.5)
node.circle.set_stroke(color=stroke_color)
node.data.set_color(color=data_color)
if show_data:
nodes.append(VGroup(node.circle, node.data))
else:
nodes.append(node.circle)
for edge in edge_dict.values():
# edge.set_stroke(width=2*scale_factor)
edge.set_color(color=edge_color)
edges.append(edge)
return VGroup(*nodes), VGroup(*edges)
def highlight_node(self, graph, index, color=GREEN_SCREEN,
start_angle=TAU / 2, scale_factor=1, animate=True, run_time=1):
node = graph[index]
surround_circle = Circle(radius=node.circle.radius * scale_factor)
surround_circle.move_to(node.circle.get_center())
# surround_circle.scale(1.15)
surround_circle.set_stroke(width=8 * scale_factor)
surround_circle.set_color(color)
surround_circle.set_fill(opacity=0)
if animate:
self.play(
ShowCreation(surround_circle),
run_time=run_time
)
return surround_circle
class BoxProblemPart2(BoxProblem):
def construct(self):
step_4 = TextMobject("4. Generalize the relationship")
step_4.scale(0.8)
step_4.move_to(DOWN * 3.5)
self.add(step_4)
title = TextMobject("Box Stacking")
title.scale(1.2)
title.shift(UP * 3.5)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS - 1)
h_line.next_to(title, DOWN)
problem = TextMobject(
r"Given $n$ boxes $[ (L_1, W_1, H_1), (L_2, W_2, H_2), \ldots , (L_n, W_n, H_n) ]$ where box $i$ has" + "\\\\",
r"length $L_i$, width $W_i$, and height $H_i$, find the height of the tallest possible stack." + "\\\\",
r"Box $(L_i, W_i, H_i)$ can be on top of box $(L_j, W_j, H_j) \text{ if } L_i < L_j, W_i < W_j$."
)
problem.scale(0.6)
problem.next_to(h_line, DOWN)
first_example = TextMobject(r"$[(2, 3, 3), (2, 2, 4), (4, 4, 2)]$")
first_example.scale(0.6)
first_example.next_to(problem, DOWN)
box_dims = [
(4, 5, 3),
(2, 3, 2),
(3, 6, 2),
(1, 5, 4),
(2, 4, 1),
(1, 2, 2),
]
display_dims = [self.convert_to_display_dimesions(d) for d in box_dims]
scale_factor = 0.3
box_shapes = []
box_colors = [RED, ORANGE, YELLOW, GREEN_SCREEN, BLUE, VIOLET]
for i, display_dim in enumerate(display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i])
box.scale(scale_factor)
box_shapes.append(box)
new_locations = [
RIGHT * 4 + DOWN * 2.5,
DOWN * 0.5,
RIGHT * 4 + DOWN * 0.5,
UP * 1.5,
DOWN * 2.5,
LEFT * 4 + DOWN * 1.5
]
for i in range(len(new_locations)):
box_shapes[i].move_to(new_locations[i])
edges = {}
edges[(1, 2)] = self.connect_arrow_between(box_shapes[1], box_shapes[2], animate=False)
edges[(1, 0)] = self.connect_arrow_between(box_shapes[1], box_shapes[0], animate=False)
edges[(3, 2)] = self.connect_arrow_between(box_shapes[3], box_shapes[2], animate=False)
edges[(4, 2)] = self.connect_arrow_between(box_shapes[4], box_shapes[2], animate=False)
edges[(4, 0)] = self.connect_arrow_between(box_shapes[4], box_shapes[0], animate=False)
edges[(5, 1)] = self.connect_arrow_between(box_shapes[5], box_shapes[1], animate=False)
edges[(5, 4)] = self.connect_arrow_between(box_shapes[5], box_shapes[4], animate=False)
edges[(5, 2)] = self.connect_arrow_between(box_shapes[5], box_shapes[2], animate=False)
edges[(5, 0)] = self.connect_arrow_between(box_shapes[5], box_shapes[0], animate=False)
box_nodes = VGroup(*box_shapes)
box_edges = VGroup(*[edges[key] for key in edges])
box_graph = VGroup(box_nodes, box_edges)
self.generalize_relationship(step_4, box_graph)
def generalize_relationship(self, step_4, box_graph):
self.wait()
box_dims = [
(5, 2, 1),
(3, 4, 1),
(5, 3, 3),
(2, 5, 3),
(2, 1, 2),
(4, 1, 5),
(4, 5, 1),
(4, 1, 2),
(2, 2, 4),
]
display_dims = [self.convert_to_display_dimesions(d) for d in box_dims]
# second_example = TextMobject(r"$[(4, 5, 3), (2, 3, 2), (3, 6, 2), (1, 5, 4), (2, 4, 1), (1, 2, 2)]$")
# second_example.scale(0.6)
# second_example.move_to(UP * 3)
# self.play(
# FadeIn(second_example)
# )
scale_factor = 0.3
box_shapes = []
box_colors = [RED, ORANGE, YELLOW, GOLD, GREEN_SCREEN, GREEN, BLUE, PURPLE, VIOLET]
for i, display_dim in enumerate(display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i])
box.scale(scale_factor)
box_shapes.append(box)
box_shapes[0].move_to(ORIGIN)
box_shapes[0].shift(LEFT * 4.5 + DOWN * 0.5)
box_shapes[1].next_to(box_shapes[0], DOWN)
box_shapes[2].next_to(box_shapes[1], DOWN)
box_shapes[3].next_to(box_shapes[0], RIGHT)
box_shapes[4].next_to(box_shapes[3], DOWN)
box_shapes[5].next_to(box_shapes[4], DOWN)
box_shapes[6].next_to(box_shapes[3], RIGHT)
box_shapes[7].next_to(box_shapes[6], DOWN)
box_shapes[8].next_to(box_shapes[7], DOWN)
box_nodes = VGroup(*box_shapes)
box_nodes.move_to(UP * 1.5)
self.play(
FadeIn(box_nodes)
)
self.wait()
question = TextMobject(r"How to solve height[$(L_i, W_i, H_i)$] in general?")
question.scale(0.8)
question.next_to(box_nodes, DOWN)
self.play(
Write(question)
)
self.wait(2)
rect = SurroundingRectangle(box_shapes[6], buff=SMALL_BUFF, color=GREEN_SCREEN)
self.play(
question[0][17:27].set_color, BLUE,
ShowCreation(rect)
)
self.wait(3)
sub_step1 = TextMobject(r"1. Let $S$ be the set of all boxes that can be stacked above $(L_i, W_i, H_i)$")
sub_step1.scale(0.7)
sub_step1.next_to(question, DOWN)
sub_step1[0][-10:].set_color(BLUE)
sub_step2 = TextMobject(
r"2. height$[(L_i, W_i, H_i)] = H_i + \text{max} \{ \text{height} [(L_j, W_j, H_j)] \mid (L_j, W_j, H_j) \in S \}$")
sub_step2.scale(0.7)
sub_step2.next_to(sub_step1, DOWN)
sub_step2[0][9:19].set_color(BLUE)
sub_step2[0][21:23].set_color(BLUE)
sub_step2[0][24:].set_color(YELLOW)
surround_rects = [SurroundingRectangle(box_shapes[i], buff=SMALL_BUFF) for i in [1, 4, 8]]
self.play(
Write(sub_step1)
)
self.wait()
self.play(
*[ShowCreation(r) for r in surround_rects]
)
self.wait(3)
surround_rects.append(rect)
self.play(
Write(sub_step2)
)
self.wait(4)
step_5 = TextMobject("5. Implement by solving subproblems in order")
step_5.scale(0.8)
step_5.move_to(step_4.get_center())
self.play(
FadeOut(box_nodes),
*[FadeOut(r) for r in surround_rects],
FadeOut(question),
sub_step1.shift, UP * 5,
sub_step2.shift, UP * 5,
ReplacementTransform(step_4, step_5),
run_time=2
)
self.wait(2)
order_note = TextMobject("What order do we solve these subproblems?")
order_note.next_to(sub_step2, DOWN)
order_note.scale(0.8)
self.play(
Write(order_note)
)
box_graph.scale(0.8)
box_graph.move_to(DOWN * 0.5)
self.play(
FadeIn(box_graph)
)
self.wait(4)
orange_rect = SurroundingRectangle(box_graph[0][1], buff=SMALL_BUFF, color=ORANGE)
red_rect = SurroundingRectangle(box_graph[0][0], buff=SMALL_BUFF, color=RED)
order_matters = TextMobject(
r"Order matters (e.g height$[(2, 3, 2)]$ must be solved before height$[(4, 5, 3)]$)")
order_matters.scale(0.8)
order_matters[0][23:30].set_color(ORANGE)
order_matters[0][-9:-2].set_color(RED)
order_matters.move_to(order_note.get_center())
self.play(
ReplacementTransform(order_note, order_matters),
ShowCreation(orange_rect),
ShowCreation(red_rect)
)
self.wait(7)
enforcing_ordering = TextMobject("How do we ensure correct ordering if boxes are given in random order?")
enforcing_ordering.scale(0.8)
enforcing_ordering.move_to(order_matters.get_center())
self.play(
ReplacementTransform(order_matters, enforcing_ordering)
)
self.wait(21)
answer_ordering = TextMobject("We can sort the boxes by length or width first!")
answer_ordering.scale(0.8)
answer_ordering.move_to(enforcing_ordering.get_center())
self.play(
ReplacementTransform(enforcing_ordering, answer_ordering)
)
self.wait(4)
self.play(
FadeOut(box_graph),
FadeOut(orange_rect),
FadeOut(red_rect)
)
self.wait()
code = self.generate_code()
code.move_to(DOWN * 0.7)
self.play(
Write(code[0])
)
self.wait()
self.play(
Write(code[1])
)
self.wait(7)
self.play(
Write(code[2])
)
self.wait(11)
self.play(
Write(code[3])
)
self.wait(2)
self.play(
Write(code[4])
)
self.wait()
self.play(
Write(code[5])
)
self.wait(3)
self.play(
Write(code[8])
)
self.wait()
self.play(
Write(code[9])
)
self.wait(3)
self.play(
Write(code[6][0])
)
self.wait(2)
self.play(
Write(code[6][1])
)
self.wait()
self.play(
Write(code[6][2])
)
self.wait(6)
self.play(
Write(code[7])
)
self.wait(10)
self.play(
FadeOut(sub_step1),
FadeOut(sub_step2),
FadeOut(answer_ordering),
FadeOut(step_5),
code.scale, 0.8,
code.move_to, UP * 2
)
self.wait()
self.simulate_solution(code)
def simulate_solution(self, code):
box_dims = [
(4, 5, 3),
(2, 3, 2),
(3, 6, 2),
(1, 5, 4),
(2, 4, 1),
(1, 2, 2),
]
display_dims = [self.convert_to_display_dimesions(d) for d in box_dims]
scale_factor = 0.3
box_shapes = []
box_colors = [RED, ORANGE, YELLOW, GREEN_SCREEN, BLUE, VIOLET]
for i, display_dim in enumerate(display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i], label_direction=UP)
box.scale(scale_factor)
box_shapes.append(box)
box_shapes[0].move_to(LEFT * 5 + DOWN * 2)
box_shapes[1].next_to(box_shapes[0], RIGHT)
box_shapes[2].next_to(box_shapes[1], RIGHT)
box_shapes[3].next_to(box_shapes[2], RIGHT)
box_shapes[4].next_to(box_shapes[3], RIGHT)
box_shapes[5].next_to(box_shapes[4], RIGHT)
box_group = VGroup(*box_shapes)
box_group.move_to(DOWN * 2)
self.play(
FadeIn(box_group)
)
self.wait()
code_arrow = Arrow(ORIGIN, RIGHT * 1.2)
code_arrow.set_color(GREEN_SCREEN)
code_arrow.next_to(code[1], LEFT * 0.5)
self.play(
ShowCreation(code_arrow)
)
self.wait()
sorted_box_dims = sorted(box_dims, key=lambda x: x[0])
sorted_display_dims = [self.convert_to_display_dimesions(d) for d in sorted_box_dims]
scale_factor = 0.3
sorted_box_shapes = []
box_colors = [GREEN_SCREEN, VIOLET, ORANGE, BLUE, YELLOW, RED]
for i, display_dim in enumerate(sorted_display_dims):
l, w, h = display_dim
box = self.construct_box(l=l, w=w, h=h, color=box_colors[i], label_direction=UP)
box.scale(scale_factor)
sorted_box_shapes.append(box)
sorted_box_shapes[0].move_to(LEFT * 5 + DOWN * 2)
sorted_box_shapes[1].next_to(sorted_box_shapes[0], RIGHT * 1.2)
sorted_box_shapes[2].next_to(sorted_box_shapes[1], RIGHT * 1.2)
sorted_box_shapes[3].next_to(sorted_box_shapes[2], RIGHT * 1.2)
sorted_box_shapes[4].next_to(sorted_box_shapes[3], RIGHT * 1.2)
sorted_box_shapes[5].next_to(sorted_box_shapes[4], RIGHT * 1.2)
sorted_box_group = VGroup(*sorted_box_shapes)
sorted_box_group.move_to(DOWN * 1.5)
map_index = [5, 2, 4, 0, 3, 1]
self.play(
*[ReplacementTransform(box_group[i],
sorted_box_group[map_index[i]]) for i in range(len(box_group))],
run_time=3
)
self.wait()
self.play(
code_arrow.next_to, code[2], LEFT * 0.5
)
self.wait()
height_text = TextMobject("heights:")
height_text.scale(0.8)
height_text.next_to(sorted_box_shapes[0], LEFT)
heights = [Integer(h).scale(0.8) for _, _, h in sorted_box_dims]
for i, h in enumerate(heights):
h.next_to(sorted_box_group[i], UP)
for i in range(1, len(heights)):
heights[i].move_to(RIGHT * heights[i].get_center()[0] + UP * heights[0].get_center()[1])
height_text.move_to(RIGHT * height_text.get_center()[0] + UP * heights[0].get_center()[1])
self.play(
*[FadeIn(h) for h in heights],
FadeIn(height_text)
)
self.wait(2)
self.play(
code_arrow.next_to, code[3], LEFT * 0.5
)
i_arrow = Arrow(DOWN * 3.2, DOWN * 2)
i_arrow.next_to(sorted_box_shapes[1], DOWN)
i_arrow.set_color(WHITE)
i_equal = TextMobject("i = ")
i_val = Integer(1)
i_val = VGroup(i_equal, i_val).arrange_submobjects(RIGHT, buff=SMALL_BUFF * 2 + DOWN * SMALL_BUFF)
i_val.scale(0.7)
i_val.next_to(i_arrow, DOWN, buff=0)
i_indicator = VGroup(i_arrow, i_val)
self.play(
ShowCreation(i_arrow),
FadeIn(i_val)
)
self.wait()
self.play(
code_arrow.next_to, code[4], LEFT * 0.5
)
self.wait(3)
self.play(
code_arrow.next_to, code[5], LEFT * 0.5
)
j_arrow = Arrow(DOWN * 3.2, DOWN * 2)
j_arrow.next_to(sorted_box_shapes[0], DOWN)
j_arrow.set_color(YELLOW)
j_equal = TextMobject("j = ")
j_val = Integer(0)
j_val = VGroup(j_equal, j_val).arrange_submobjects(RIGHT, buff=SMALL_BUFF * 2 + DOWN * SMALL_BUFF)
j_val.scale(0.7)
j_val.set_color(YELLOW)
j_val.next_to(j_arrow, DOWN, buff=0)
j_indicator = VGroup(j_arrow, j_val)
self.play(
ShowCreation(j_arrow),
FadeIn(j_val)
)
self.wait(7)
self.play(
code_arrow.next_to, code[6], LEFT * 0.5
)
self.wait(6)
self.play(
code_arrow.next_to, code[3], LEFT * 0.5
)
self.wait()
self.move_tracker(i_indicator, sorted_box_shapes, 2, start=1)
self.play(
code_arrow.next_to, code[4], LEFT * 0.5
)
self.wait()
self.play(
code_arrow.next_to, code[5], LEFT * 0.5
)
surround_rects = []
rect = SurroundingRectangle(sorted_box_shapes[1], buff=SMALL_BUFF)
surround_rects.append(rect)
S = TextMobject("S").scale(0.8)
S.next_to(sorted_box_shapes[0], LEFT)
S.shift(LEFT * 0.5)
surround_S = SurroundingRectangle(S, buff=SMALL_BUFF)
self.wait()
self.play(
FadeIn(S),
FadeIn(surround_S),
)
self.wait()
self.move_tracker(j_indicator, sorted_box_shapes, 1)
self.play(
ShowCreation(rect)
)
self.wait(3)
self.play(
code_arrow.next_to, code[6], LEFT * 0.5
)
self.wait(11)
self.play(
heights[2].increment_value, 2
)
self.play(
code_arrow.next_to, code[3], LEFT * 0.5
)
self.wait()
self.move_tracker(i_indicator, sorted_box_shapes, 3, start=1)
self.play(
*[FadeOut(r) for r in surround_rects],
code_arrow.next_to, code[4], LEFT * 0.5,
)
self.wait(3)
self.play(
code_arrow.next_to, code[5], LEFT * 0.5
)
self.move_tracker(j_indicator, sorted_box_shapes, 0)
self.wait()
self.move_tracker(j_indicator, sorted_box_shapes, 1)
self.play(
ShowCreation(surround_rects[0])
)
self.move_tracker(j_indicator, sorted_box_shapes, 2)
self.play(
code_arrow.next_to, code[6], LEFT * 0.5
)
self.wait()
self.play(
heights[3].increment_value, 2
)
self.play(
code_arrow.next_to, code[3], LEFT * 0.5
)
self.move_tracker(i_indicator, sorted_box_shapes, 4, start=1)
self.play(
*[FadeOut(r) for r in surround_rects],
code_arrow.next_to, code[4], LEFT * 0.5,
)
self.wait(2)
self.play(
code_arrow.next_to, code[5], LEFT * 0.5
)
self.wait()
self.move_tracker(j_indicator, sorted_box_shapes, 0)
new_rect = SurroundingRectangle(sorted_box_shapes[0], buff=SMALL_BUFF)
self.play(
ShowCreation(new_rect)
)
surround_rects.insert(0, new_rect)
self.move_tracker(j_indicator, sorted_box_shapes, 1)
self.play(
ShowCreation(surround_rects[1])
)
self.move_tracker(j_indicator, sorted_box_shapes, 2)
new_rect = SurroundingRectangle(sorted_box_shapes[2], buff=SMALL_BUFF)
self.play(
ShowCreation(new_rect)
)
surround_rects.append(new_rect)
self.move_tracker(j_indicator, sorted_box_shapes, 3)
new_rect = SurroundingRectangle(sorted_box_shapes[3], buff=SMALL_BUFF)
self.play(
ShowCreation(new_rect)
)
surround_rects.append(new_rect)
self.play(
code_arrow.next_to, code[6], LEFT * 0.5
)
self.wait(5)
self.play(
heights[4].increment_value, 4
)
self.wait()
self.play(
code_arrow.next_to, code[3], LEFT * 0.5
)
self.wait()
self.move_tracker(i_indicator, sorted_box_shapes, 5, start=1)
self.play(
*[FadeOut(r) for r in surround_rects],
code_arrow.next_to, code[4], LEFT * 0.5,
)
self.play(
code_arrow.next_to, code[5], LEFT * 0.5
)
self.move_tracker(j_indicator, sorted_box_shapes, 0)
self.move_tracker(j_indicator, sorted_box_shapes, 1)
self.play(
ShowCreation(surround_rects[1])
)
self.move_tracker(j_indicator, sorted_box_shapes, 2)
self.play(
ShowCreation(surround_rects[2])
)
self.move_tracker(j_indicator, sorted_box_shapes, 3)
self.play(
ShowCreation(surround_rects[3])
)
self.move_tracker(j_indicator, sorted_box_shapes, 4)
self.play(
code_arrow.next_to, code[6], LEFT * 0.5
)
self.wait(9)
self.play(
heights[5].increment_value, 4
)
self.wait(5)
self.play(
code_arrow.next_to, code[7], LEFT * 0.5
)
self.play(
Indicate(heights[5]),
)
self.play(
Indicate(heights[5])
)
self.play(
heights[5].set_color, YELLOW
)
self.wait(3)
self.play(
*[FadeOut(surround_rects[i]) for i in range(1, 4)],
FadeOut(code_arrow),
FadeOut(i_indicator),
FadeOut(j_indicator),
FadeOut(S),
FadeOut(surround_S)
)
self.wait(23)
def move_tracker(self, indicator, sorted_box_shapes, j, start=0):
copy = indicator.copy()
current = copy.get_center()
copy.next_to(sorted_box_shapes[j], DOWN)
shift_amount = copy.get_center() - current
self.play(
indicator.next_to, sorted_box_shapes[j], DOWN,
indicator[1][1].increment_value, j - start,
indicator[1][1].shift, shift_amount,
)
def generate_code(self):
code_scale = 0.7
code = []
def_statement = TextMobject("def ", r"$\text{tallestStack}$(boxes):")
def_statement[0].set_color(MONOKAI_BLUE)
def_statement[1][:12].set_color(MONOKAI_GREEN)
def_statement[1][13:18].set_color(MONOKAI_ORANGE)
def_statement.scale(code_scale)
def_statement.to_edge(LEFT)
code.append(def_statement)
line_1 = TextMobject(r"boxes.sort(key$=$lambda x: x$[0]$)")
line_1.scale(code_scale)
line_1.next_to(def_statement, DOWN * 0.5)
line_1.to_edge(LEFT * 2)
# line_1[0][5].shift(DOWN * SMALL_BUFF)
line_1[0][6:10].set_color(MONOKAI_BLUE)
line_1[0][11:14].set_color(MONOKAI_ORANGE)
line_1[0][14].set_color(MONOKAI_PINK)
line_1[0][15:21].set_color(MONOKAI_BLUE)
line_1[0][-3].set_color(MONOKAI_PURPLE)
code.append(line_1)
line_2 = TextMobject(r"heights $= \{ \text{box:box}[2] \text{ for box in boxes}\}$")
line_2.scale(code_scale)
line_2.next_to(line_1, DOWN * 0.5)
line_2.to_edge(LEFT * 2)
line_2[0][7].set_color(MONOKAI_PINK)
line_2[0][17].set_color(MONOKAI_PURPLE)
line_2[0][19:22].set_color(MONOKAI_PINK)
line_2[0][25:27].set_color(MONOKAI_PINK)
code.append(line_2)
line_3 = TextMobject(r"for i in range(1, len(boxes)):")
line_3.scale(code_scale)
line_3.next_to(line_2, DOWN * 0.5)
line_3.to_edge(LEFT * 2)
line_3[0][:3].set_color(MONOKAI_PINK)
line_3[0][4:6].set_color(MONOKAI_PINK)
line_3[0][6:11].set_color(MONOKAI_BLUE)
line_3[0][12].set_color(MONOKAI_PURPLE)
line_3[0][14:17].set_color(MONOKAI_BLUE)
code.append(line_3)
line_4 = TextMobject(r"box $= \text{boxes}[\text{i}]$")
line_4.scale(code_scale)
line_4.next_to(line_3, DOWN * 0.5)
line_4.to_edge(LEFT * 3)
line_4[0][3].set_color(MONOKAI_PINK)
code.append(line_4)
line_5 = TextMobject(
r"S $= [\text{boxes}[\text{j}] \text{ for j in range(i) if canBeStacked(boxes}[\text{j}] \text{, box)}]$")
line_5.scale(code_scale)
line_5[0][1].set_color(MONOKAI_PINK)
line_5[0][11:14].set_color(MONOKAI_PINK)
line_5[0][15:17].set_color(MONOKAI_PINK)
line_5[0][17:22].set_color(MONOKAI_BLUE)
line_5[0][25:27].set_color(MONOKAI_PINK)
line_5[0][27:39].set_color(MONOKAI_BLUE)
line_5.next_to(line_4, DOWN * 0.5)
line_5.to_edge(LEFT * 3)
code.append(line_5)
line_6 = TextMobject(r"$\text{heights}[\text{box}] = $", r"$\text{ box}[2] \text{ } + $",
r"$\text{ max}([\text{heights}[\text{box}] \text{ for box in S}]$, default$=$0)")
line_6.scale(code_scale)
line_6[0][12].set_color(MONOKAI_PINK)
line_6[1][4].set_color(MONOKAI_PURPLE)
line_6[1][6].set_color(MONOKAI_PINK)
line_6[2][:3].set_color(MONOKAI_BLUE)
line_6[2][17:20].set_color(MONOKAI_PINK)
line_6[2][23:25].set_color(MONOKAI_PINK)
line_6[2][28:35].set_color(MONOKAI_ORANGE)
line_6[2][35].set_color(MONOKAI_PINK)
line_6[2][36].set_color(MONOKAI_PURPLE)
line_6.next_to(line_5, DOWN * 0.5)
line_6.to_edge(LEFT * 3)
code.append(line_6)
line_7 = TextMobject(r"return max(heights.values(), default$=$0)")
line_7.scale(code_scale)
line_7[0][:6].set_color(MONOKAI_PINK)
line_7[0][6:9].set_color(MONOKAI_BLUE)
line_7[0][18:24].set_color(MONOKAI_BLUE)
line_7[0][27:34].set_color(MONOKAI_ORANGE)
line_7[0][34].set_color(MONOKAI_PINK)
line_7[0][35].set_color(MONOKAI_PURPLE)
line_7.next_to(line_6, DOWN * 0.5)
line_7.to_edge(LEFT * 2)
code.append(line_7)
line_8 = TextMobject(r"def canBeStacked(top, bottom):")
line_8.scale(code_scale)
line_8[0][:3].set_color(MONOKAI_BLUE)
line_8[0][3:15].set_color(MONOKAI_GREEN)
line_8[0][16:19].set_color(MONOKAI_ORANGE)
line_8[0][20:26].set_color(MONOKAI_ORANGE)
line_8.next_to(line_7, DOWN * 0.5)
line_8.to_edge(LEFT)
code.append(line_8)
line_9 = TextMobject(r"return top$[0] <$ bottom$[0]$ and top$[1] <$ bottom$[1]$")
line_9.scale(code_scale)
line_9[0][:6].set_color(MONOKAI_PINK)
line_9[0][10].set_color(MONOKAI_PURPLE)
line_9[0][12].set_color(MONOKAI_PINK)
line_9[0][20].set_color(MONOKAI_PURPLE)
line_9[0][22:25].set_color(MONOKAI_PINK)
line_9[0][29].set_color(MONOKAI_PURPLE)
line_9[0][31].set_color(MONOKAI_PINK)
line_9[0][-2].set_color(MONOKAI_PURPLE)
line_9.next_to(line_8, DOWN * 0.5)
line_9.to_edge(LEFT * 2)
code.append(line_9)
return VGroup(*code)
class DPConclusion(Scene):
def construct(self):
title = TextMobject("Finding Subproblems")
title.scale(1.2)
title.shift(UP * 3.5)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS - 1)
h_line.next_to(title, DOWN)
self.play(
Write(title),
ShowCreation(h_line)
)
self.types_of_subprobs(title, h_line)
def types_of_subprobs(self, title, h_line):
self.wait(5)
common_subproblems = TextMobject("Common Subproblems")
common_subproblems.next_to(h_line, DOWN)
self.play(
Write(common_subproblems)
)
self.wait(7)
grid = self.create_grid(1, 9, 1)
grid_group = self.get_group_grid(grid)
for i in range(1, 10):
text = TextMobject(r"$x_{0}$".format(i)).scale(0.8)
text.move_to(grid[0][i - 1].get_center())
grid_group.add(text)
grid_group.move_to(ORIGIN)
input_problem = TextMobject(r"Input: $x_1, x_2, \ldots , x_n$")
input_problem.scale(0.8)
input_problem.next_to(grid_group, UP)
self.play(
FadeIn(grid_group),
FadeIn(input_problem)
)
self.wait()
subproblem = TextMobject(r"Subproblem: $x_1, x_2, \ldots , x_i$")
subproblem.scale(0.8)
subproblem.next_to(grid_group, DOWN)
self.play(
Write(subproblem),
grid_group[0].set_fill, BLUE, 0.3,
grid_group[1].set_fill, BLUE, 0.3,
grid_group[2].set_fill, BLUE, 0.3,
grid_group[3].set_fill, BLUE, 0.3,
grid_group[4].set_fill, BLUE, 0.3,
grid_group[5].set_fill, BLUE, 0.3,
)
self.wait(6)
self.play(
input_problem.shift, UP * 1,
grid_group.shift, UP * 1,
subproblem.shift, UP * 1
)
self.wait(5)
random_input = TextMobject(r"Input: $x_1, x_2, \ldots , x_n$ in random order")
random_input.scale(0.8)
random_input.move_to(DOWN)
random_order = list(range(9))
random.seed(2)
random.shuffle(random_order)
text_x = {}
random_grid = self.create_grid(1, 9, 1)
random_grid_group = self.get_group_grid(random_grid)
random_grid_group.next_to(random_input, DOWN)
for i in range(1, 10):
text = TextMobject(r"$x_{0}$".format(i)).scale(0.8)
text.move_to(random_grid[0][random_order[i - 1]].get_center())
text_x[random_order[i - 1]] = text
random_grid_group.add(text)
self.play(
FadeIn(random_input),
FadeIn(random_grid_group)
)
original_order = list(range(9))
mapping = [(i, random_order.index(i)) for i in range(9)]
sorting = [ApplyMethod(
text_x[i].move_to,
random_grid[0][j].get_center()) for i, j in mapping]
self.wait(3)
self.play(
*sorting,
run_time=2
)
self.wait()
random_subproblem = TextMobject(r"Subproblem: $x_1, x_2, \ldots , x_i$ after sorting")
random_subproblem.scale(0.8)
random_subproblem.next_to(random_grid_group, DOWN)
self.play(
Write(random_subproblem),
random_grid_group[0].set_fill, BLUE, 0.3,
random_grid_group[1].set_fill, BLUE, 0.3,
random_grid_group[2].set_fill, BLUE, 0.3,
random_grid_group[3].set_fill, BLUE, 0.3,
random_grid_group[4].set_fill, BLUE, 0.3,
random_grid_group[5].set_fill, BLUE, 0.3,
)
self.wait(4)
self.play(
FadeOut(grid_group),
FadeOut(random_grid_group),
FadeOut(random_subproblem),
FadeOut(random_input),
FadeOut(input_problem),
FadeOut(subproblem)
)
self.wait(3)
grid = self.create_grid(1, 9, 1)
grid_group = self.get_group_grid(grid)
for i in range(1, 10):
text = TextMobject(r"$x_{0}$".format(i)).scale(0.8)
text.move_to(grid[0][i - 1].get_center())
grid_group.add(text)
grid_group.move_to(UP * 0.5)
y_grid = self.create_grid(1, 8, 1)
y_grid_group = self.get_group_grid(y_grid)
y_grid_group.next_to(grid_group, DOWN)
for i in range(1, 9):
text = TextMobject(r"$y_{0}$".format(i)).scale(0.8)
text.move_to(y_grid[0][i - 1].get_center())
y_grid_group.add(text)
input_problem = TextMobject(r"Input: $x_1, x_2, \ldots , x_n$ and $y_1, y_2, \ldots , y_m$")
input_problem.scale(0.8)
input_problem.next_to(grid_group, UP)
subproblem = TextMobject(r"Subproblem: $x_1, x_2, \ldots , x_i$ and $y_1, y_2, \ldots , y_j$")
subproblem.scale(0.8)
subproblem.next_to(y_grid_group, DOWN)
y_grid_group.shift(LEFT * 0.5)
self.play(
FadeIn(grid_group),
FadeIn(y_grid_group),
FadeIn(input_problem)
)
self.wait(3)
self.play(
Write(subproblem),
grid_group[0].set_fill, BLUE, 0.3,
grid_group[1].set_fill, BLUE, 0.3,
grid_group[2].set_fill, BLUE, 0.3,
grid_group[3].set_fill, BLUE, 0.3,
grid_group[4].set_fill, BLUE, 0.3,
grid_group[5].set_fill, BLUE, 0.3,
y_grid_group[0].set_fill, BLUE, 0.3,
y_grid_group[1].set_fill, BLUE, 0.3,
y_grid_group[2].set_fill, BLUE, 0.3,
y_grid_group[3].set_fill, BLUE, 0.3,
y_grid_group[4].set_fill, BLUE, 0.3,
)
self.wait(5)
self.play(
FadeOut(input_problem),
FadeOut(grid_group),
FadeOut(y_grid_group),
FadeOut(subproblem)
)
grid = self.create_grid(1, 9, 1)
grid_group = self.get_group_grid(grid)
for i in range(1, 10):
text = TextMobject(r"$x_{0}$".format(i)).scale(0.8)
text.move_to(grid[0][i - 1].get_center())
grid_group.add(text)
grid_group.move_to(ORIGIN)
input_problem = TextMobject(r"Input: $x_1, x_2, \ldots , x_n$")
input_problem.scale(0.8)
input_problem.next_to(grid_group, UP)
self.play(
FadeIn(grid_group),
FadeIn(input_problem)
)
self.wait(3)
subproblem = TextMobject(r"Subproblem: $x_i, x_{i + 1}, \ldots , x_j$")
subproblem.scale(0.8)
subproblem.next_to(grid_group, DOWN)
self.play(
Write(subproblem),
grid_group[2].set_fill, BLUE, 0.3,
grid_group[3].set_fill, BLUE, 0.3,
grid_group[4].set_fill, BLUE, 0.3,
grid_group[5].set_fill, BLUE, 0.3,
grid_group[6].set_fill, BLUE, 0.3,
)
self.wait(5)
self.play(
FadeOut(subproblem),
FadeOut(grid_group),
FadeOut(input_problem)
)
self.wait(4)
grid = self.create_grid(6, 8, 0.7)
grid_group = self.get_group_grid(grid)
grid_group.move_to(DOWN * 0.5)
input_problem = TextMobject(r"Input: matrix $A_{mn}$")
input_problem.scale(0.8)
input_problem.next_to(grid_group, UP)
self.play(
FadeIn(grid_group),
FadeIn(input_problem)
)
self.wait(5)
subproblem = TextMobject(r"Subproblem: matrix $A_{ij}$")
subproblem.scale(0.8)
subproblem.next_to(grid_group, DOWN)
coords = []
for i in range(7):
for j in range(5):
coords.append((j, i))
self.play(
Write(subproblem),
*[ApplyMethod(grid[i][j].set_fill, BLUE, 0.3) for i, j in coords]
)
self.wait(12)
frame_rect = ScreenRectangle(height=5)
frame_rect.move_to(DOWN * 0.1)
self.play(
FadeOut(common_subproblems),
FadeOut(input_problem),
FadeOut(subproblem),
ReplacementTransform(grid_group, frame_rect),
run_time=2
)
practice = TextMobject("Practice and experience is the key!")
practice.next_to(frame_rect, DOWN)
self.play(
Write(practice)
)
self.wait(7)
def create_grid(self, rows, columns, square_length):
left_corner = Square(side_length=square_length)
grid = []
first_row = [left_corner]
for i in range(columns - 1):
square = Square(side_length=square_length)
square.next_to(first_row[i], RIGHT, buff=0)
first_row.append(square)
grid.append(first_row)
for i in range(rows - 1):
prev_row = grid[i]
# print(prev_row)
new_row = []
for square in prev_row:
# print(square)
square_below = Square(side_length=square_length)
square_below.next_to(square, DOWN, buff=0)
new_row.append(square_below)
grid.append(new_row)
return grid
def get_group_grid(self, grid):
squares = []
for row in grid:
for square in row:
squares.append(square)
return VGroup(*squares)
class Thumbnail(Scene):
def construct(self):
grid = self.create_grid(5, 5, 1)
grid_group = self.get_group_grid(grid)
grid_group.move_to(DOWN * 0.5)
self.play(
FadeIn(grid_group)
)
title = TextMobject("Dynamic Programming")
title.scale(1.7)
title.next_to(grid_group, UP * 1.5)
self.play(
FadeIn(title)
)
color_fill = DARK_BLUE_B
color_border = GREEN_SCREEN
color_flash = BLUE
self.play(
grid[0][0].set_fill, color_fill, 1,
grid[0][1].set_fill, color_fill, 1,
grid[1][1].set_fill, color_fill, 1,
grid[1][2].set_fill, color_fill, 1,
grid[2][2].set_fill, color_fill, 1,
grid[3][2].set_fill, color_fill, 1,
grid[3][3].set_fill, color_fill, 1,
grid[3][4].set_fill, color_fill, 1,
grid[4][4].set_fill, color_fill, 1,
)
group = VGroup(
grid[0][0],
grid[0][1],
grid[1][1],
grid[1][2],
grid[2][2],
grid[3][2],
grid[3][3],
grid[3][4],
grid[4][4],
)
group.set_stroke(color=color_border, width=10)
self.play(
FadeIn(group)
)
self.play(
Flash(grid[0][0], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[0][1], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[1][1], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[1][2], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[2][2], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[3][2], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[3][3], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[3][4], color=color_flash, num_lines=10, line_stroke_width=5),
Flash(grid[4][4], color=color_flash, num_lines=10, line_stroke_width=5),
)
self.wait()
def create_grid(self, rows, columns, square_length):
left_corner = Square(side_length=square_length)
grid = []
first_row = [left_corner]
for i in range(columns - 1):
square = Square(side_length=square_length)
square.next_to(first_row[i], RIGHT, buff=0)
first_row.append(square)
grid.append(first_row)
for i in range(rows - 1):
prev_row = grid[i]
# print(prev_row)
new_row = []
for square in prev_row:
# print(square)
square_below = Square(side_length=square_length)
square_below.next_to(square, DOWN, buff=0)
new_row.append(square_below)
grid.append(new_row)
return grid
def get_group_grid(self, grid):
squares = []
for row in grid:
for square in row:
squares.append(square)
return VGroup(*squares)
| [
"[email protected]"
] | |
dcd5f09d699ae8005ad13ed0471cc5fc9cd3ad98 | 7791cae9c097ffbfeefd34dea31586b1963eb675 | /0x0A-python-inheritance/0-lookup.py | 569a633018f8263664a1cd264b61d7d4a4a2f514 | [] | no_license | jicruz96/holbertonschool-higher_level_programming | a33b6c326e832868be791cad87ac703cccbabd64 | 84361c552b7ba1cb173d1e4bd2ea077bb3999b0d | refs/heads/master | 2022-12-18T06:07:34.248796 | 2020-09-28T22:33:59 | 2020-09-28T22:33:59 | 259,228,049 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!/usr/bin/python3
""" defines lookup """
def lookup(obj):
""" returns list of available attributes and methods of an object """
return list(dir(obj))
| [
"[email protected]"
] | |
1744fb64068f0170698c638c5665cd1bccc26c5e | 3665e5e6946fd825bb03b3bcb79be96262ab6d68 | /tests/test_ls.py | f4e454cbecd836e1dfba94644ab03d1de0b6e72b | [
"MIT",
"BSD-3-Clause"
] | permissive | philippeitis/jc | a28b84cff7fb2852a374a7f0f41151b103288f26 | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | refs/heads/master | 2021-02-16T05:03:03.022601 | 2020-03-04T16:30:52 | 2020-03-04T16:30:52 | 244,969,097 | 0 | 0 | MIT | 2020-03-08T21:10:36 | 2020-03-04T18:01:38 | null | UTF-8 | Python | false | false | 18,866 | py | import os
import json
import unittest
import jc.parsers.ls
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls.out'), 'r') as f:
self.centos_7_7_ls = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls.out'), 'r') as f:
self.ubuntu_18_4_ls = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls.out'), 'r') as f:
self.osx_10_11_6_ls = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls.out'), 'r') as f:
self.osx_10_14_6_ls = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-al.out'), 'r') as f:
self.centos_7_7_ls_al = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-al.out'), 'r') as f:
self.ubuntu_18_4_ls_al = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls-al.out'), 'r') as f:
self.osx_10_11_6_ls_al = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-al.out'), 'r') as f:
self.osx_10_14_6_ls_al = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-alh.out'), 'r') as f:
self.centos_7_7_ls_alh = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-alh.out'), 'r') as f:
self.ubuntu_18_4_ls_alh = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls-alh.out'), 'r') as f:
self.osx_10_11_6_ls_alh = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-alh.out'), 'r') as f:
self.osx_10_14_6_ls_alh = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-R.out'), 'r') as f:
self.centos_7_7_ls_R = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-R.out'), 'r') as f:
self.ubuntu_18_4_ls_R = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-R.out'), 'r') as f:
self.osx_10_14_6_ls_R = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-alR.out'), 'r') as f:
self.centos_7_7_ls_alR = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-alR.out'), 'r') as f:
self.ubuntu_18_4_ls_alR = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-alR.out'), 'r') as f:
self.osx_10_14_6_ls_alR = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-glob.out'), 'r') as f:
self.centos_7_7_ls_glob = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-glob.out'), 'r') as f:
self.ubuntu_18_4_ls_glob = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-glob.out'), 'r') as f:
self.osx_10_14_6_ls_glob = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-R-newlines.out'), 'r') as f:
self.centos_7_7_ls_R_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-R-newlines.out'), 'r') as f:
self.ubuntu_18_4_ls_R_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-R-newlines.out'), 'r') as f:
self.osx_10_14_6_ls_R_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-l-newlines.out'), 'r') as f:
self.centos_7_7_ls_l_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-l-newlines.out'), 'r') as f:
self.ubuntu_18_4_ls_l_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-l-newlines.out'), 'r') as f:
self.osx_10_14_6_ls_l_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-lR-newlines.out'), 'r') as f:
self.centos_7_7_ls_lR_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-lR-newlines.out'), 'r') as f:
self.ubuntu_18_4_ls_lR_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-lR-newlines.out'), 'r') as f:
self.osx_10_14_6_ls_lR_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-newlines.out'), 'r') as f:
self.centos_7_7_ls_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-newlines.out'), 'r') as f:
self.ubuntu_18_4_ls_newlines = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-newlines.out'), 'r') as f:
self.osx_10_14_6_ls_newlines = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls.json'), 'r') as f:
self.centos_7_7_ls_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls.json'), 'r') as f:
self.ubuntu_18_4_ls_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls.json'), 'r') as f:
self.osx_10_11_6_ls_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls.json'), 'r') as f:
self.osx_10_14_6_ls_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-al.json'), 'r') as f:
self.centos_7_7_ls_al_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-al.json'), 'r') as f:
self.ubuntu_18_4_ls_al_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls-al.json'), 'r') as f:
self.osx_10_11_6_ls_al_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-al.json'), 'r') as f:
self.osx_10_14_6_ls_al_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-alh.json'), 'r') as f:
self.centos_7_7_ls_alh_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-alh.json'), 'r') as f:
self.ubuntu_18_4_ls_alh_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/ls-alh.json'), 'r') as f:
self.osx_10_11_6_ls_alh_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-alh.json'), 'r') as f:
self.osx_10_14_6_ls_alh_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-R.json'), 'r') as f:
self.centos_7_7_ls_R_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-R.json'), 'r') as f:
self.ubuntu_18_4_ls_R_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-R.json'), 'r') as f:
self.osx_10_14_6_ls_R_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-alR.json'), 'r') as f:
self.centos_7_7_ls_alR_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-alR.json'), 'r') as f:
self.ubuntu_18_4_ls_alR_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-alR.json'), 'r') as f:
self.osx_10_14_6_ls_alR_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-glob.json'), 'r') as f:
self.centos_7_7_ls_glob_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-glob.json'), 'r') as f:
self.ubuntu_18_4_ls_glob_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-glob.json'), 'r') as f:
self.osx_10_14_6_ls_glob_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-R-newlines.json'), 'r') as f:
self.centos_7_7_ls_R_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-R-newlines.json'), 'r') as f:
self.ubuntu_18_4_ls_R_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-R-newlines.json'), 'r') as f:
self.osx_10_14_6_ls_R_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-l-newlines.json'), 'r') as f:
self.centos_7_7_ls_l_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-l-newlines.json'), 'r') as f:
self.ubuntu_18_4_ls_l_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-l-newlines.json'), 'r') as f:
self.osx_10_14_6_ls_l_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-lR-newlines.json'), 'r') as f:
self.centos_7_7_ls_lR_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-lR-newlines.json'), 'r') as f:
self.ubuntu_18_4_ls_lR_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-lR-newlines.json'), 'r') as f:
self.osx_10_14_6_ls_lR_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/ls-newlines.json'), 'r') as f:
self.centos_7_7_ls_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/ls-newlines.json'), 'r') as f:
self.ubuntu_18_4_ls_newlines_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/ls-newlines.json'), 'r') as f:
self.osx_10_14_6_ls_newlines_json = json.loads(f.read())
def test_ls_centos_7_7(self):
"""
Test plain 'ls /' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls, quiet=True), self.centos_7_7_ls_json)
def test_ls_ubuntu_18_4(self):
"""
Test plain 'ls /' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls, quiet=True), self.ubuntu_18_4_ls_json)
def test_ls_osx_10_11_6(self):
"""
Test plain 'ls /' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_11_6_ls, quiet=True), self.osx_10_11_6_ls_json)
def test_ls_osx_10_14_6(self):
"""
Test plain 'ls /' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls, quiet=True), self.osx_10_14_6_ls_json)
def test_ls_al_centos_7_7(self):
"""
Test 'ls -al /' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_al, quiet=True), self.centos_7_7_ls_al_json)
def test_ls_al_ubuntu_18_4(self):
"""
Test 'ls -al /' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_al, quiet=True), self.ubuntu_18_4_ls_al_json)
def test_ls_al_osx_10_11_6(self):
"""
Test 'ls -al /' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_11_6_ls_al, quiet=True), self.osx_10_11_6_ls_al_json)
def test_ls_al_osx_10_14_6(self):
"""
Test 'ls -al /' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_al, quiet=True), self.osx_10_14_6_ls_al_json)
def test_ls_alh_centos_7_7(self):
"""
Test 'ls -alh /' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_alh, quiet=True), self.centos_7_7_ls_alh_json)
def test_ls_alh_ubuntu_18_4(self):
"""
Test 'ls -alh /' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_alh, quiet=True), self.ubuntu_18_4_ls_alh_json)
def test_ls_alh_osx_10_11_6(self):
"""
Test 'ls -alh /' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_11_6_ls_alh, quiet=True), self.osx_10_11_6_ls_alh_json)
def test_ls_alh_osx_10_14_6(self):
"""
Test 'ls -alh /' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_alh, quiet=True), self.osx_10_14_6_ls_alh_json)
def test_ls_R_centos_7_7(self):
"""
Test 'ls -R /usr' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_R, quiet=True), self.centos_7_7_ls_R_json)
def test_ls_R_ubuntu_18_4(self):
"""
Test 'ls -R /usr' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_R, quiet=True), self.ubuntu_18_4_ls_R_json)
def test_ls_R_osx_10_14_6(self):
"""
Test 'ls -R /usr' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_R, quiet=True), self.osx_10_14_6_ls_R_json)
def test_ls_alR_centos_7_7(self):
"""
Test 'ls -alR /usr' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_alR, quiet=True), self.centos_7_7_ls_alR_json)
def test_ls_alR_ubuntu_18_4(self):
"""
Test 'ls -alR /usr' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_alR, quiet=True), self.ubuntu_18_4_ls_alR_json)
def test_ls_alR_osx_10_14_6(self):
"""
Test 'ls -alR /usr' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_alR, quiet=True), self.osx_10_14_6_ls_alR_json)
def test_ls_glob_centos_7_7(self):
"""
Test 'ls /usr/*' on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_glob, quiet=True), self.centos_7_7_ls_glob_json)
def test_ls_glob_ubuntu_18_4(self):
"""
Test 'ls /usr/*' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_glob, quiet=True), self.ubuntu_18_4_ls_glob_json)
def test_ls_glob_osx_10_14_6(self):
"""
Test 'ls /usr/*' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_glob, quiet=True), self.osx_10_14_6_ls_glob_json)
def test_ls_R_newlines_centos_7_7(self):
"""
Test 'ls -R' for filenames with newline characters on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_R_newlines, quiet=True), self.centos_7_7_ls_R_newlines_json)
def test_ls_R_newlines_ubuntu_18_4(self):
"""
Test 'ls -R' for filenames with newline characters on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_R_newlines, quiet=True), self.ubuntu_18_4_ls_R_newlines_json)
def test_ls_R_newlines_osx_10_14_6(self):
"""
Test 'ls -R' for filenames with newline characters on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_R_newlines, quiet=True), self.osx_10_14_6_ls_R_newlines_json)
def test_ls_l_newlines_centos_7_7(self):
"""
Test 'ls -l' for filenames with newline characters on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_l_newlines, quiet=True), self.centos_7_7_ls_l_newlines_json)
def test_ls_l_newlines_ubuntu_18_4(self):
"""
Test 'ls -l' for filenames with newline characters on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_l_newlines, quiet=True), self.ubuntu_18_4_ls_l_newlines_json)
def test_ls_l_newlines_osx_10_14_6(self):
"""
Test 'ls -l' for filenames with newline characters on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_l_newlines, quiet=True), self.osx_10_14_6_ls_l_newlines_json)
def test_ls_lR_newlines_centos_7_7(self):
"""
Test 'ls -lR' for filenames with newline characters on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_lR_newlines, quiet=True), self.centos_7_7_ls_lR_newlines_json)
def test_ls_lR_newlines_ubuntu_18_4(self):
"""
Test 'ls -lR' for filenames with newline characters on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_lR_newlines, quiet=True), self.ubuntu_18_4_ls_lR_newlines_json)
def test_ls_lR_newlines_osx_10_14_6(self):
"""
Test 'ls -lR' for filenames with newline characters on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_lR_newlines, quiet=True), self.osx_10_14_6_ls_lR_newlines_json)
def test_ls_newlines_centos_7_7(self):
"""
Test 'ls' for filenames with newline characters on Centos 7.7
"""
self.assertEqual(jc.parsers.ls.parse(self.centos_7_7_ls_newlines, quiet=True), self.centos_7_7_ls_newlines_json)
def test_ls_newlines_ubuntu_18_4(self):
"""
Test 'ls' for filenames with newline characters on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.ls.parse(self.ubuntu_18_4_ls_newlines, quiet=True), self.ubuntu_18_4_ls_newlines_json)
def test_ls_newlines_osx_10_14_6(self):
"""
Test 'ls' for filenames with newline characters on OSX 10.14.6
"""
self.assertEqual(jc.parsers.ls.parse(self.osx_10_14_6_ls_newlines, quiet=True), self.osx_10_14_6_ls_newlines_json)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c0776c2b71dbc2bdb19e3384ee68199840921c0c | b3d552675b36cb88a1388fcfc531e497ad7cbee9 | /day6/view_method_decorator_demo/front/views.py | 1ec7acae722b4aed907b645bc8aa11d6e724563a | [] | no_license | gaohj/1902_django | 3cea1f0935fd983f25c6fd832b103ac5165a2e30 | 822af7b42120c6edc699bf97c800887ff84f5621 | refs/heads/master | 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 | Python | UTF-8 | Python | false | false | 1,026 | py | from django.shortcuts import render
from .models import Article
from django.views.decorators.http import require_http_methods,require_GET,require_POST,require_safe
from django.http import HttpResponse
# Create your views here.
# @require_http_methods(['GET'])
#@require_GET = @require_http_methods(['GET'])
@require_GET
def index(request):
articles = Article.objects.all()
return render(request,'index.html',context={"articles":articles})
@require_http_methods(['GET','POST'])
def add_article(request):
if request.method == 'GET':
return render(request,'add_article.html')
else:
title = request.POST.get('title')
content = request.POST.get('content')
price = request.POST.get('price')
Article.objects.create(title=title,content=content,price=price)
return HttpResponse("success")
@require_safe
def hello(request):
return HttpResponse("我只允许相对安全的请求方式来访问视图")
#get head 这两个都是进行查看 并没有增删改的操作 | [
"[email protected]"
] | |
9983fb155c2903b301cad1850061e7032e0161b4 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/comtypes/test/test_QueryService.py | d3c01ad2e505a3952d4b4fe8e118d3bf5894e9af | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 797 | py | import unittest
from ctypes import POINTER
import comtypes
from comtypes.client import CreateObject, GetModule
GetModule('oleacc.dll')
from comtypes.gen.Accessibility import IAccessible
@unittest.skip("This IE test is not working. We need to move it to using some other win32 API.")
class TestCase(unittest.TestCase):
def setUp(self):
self.ie = CreateObject('InternetExplorer.application')
def tearDown(self):
self.ie.Quit()
del self.ie
def test(self):
ie = self.ie
ie.navigate2("about:blank", 0)
sp = ie.Document.Body.QueryInterface(comtypes.IServiceProvider)
pacc = sp.QueryService(IAccessible._iid_, IAccessible)
self.assertEqual(type(pacc), POINTER(IAccessible))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
a703dd3d46d7f867a7b64c23fdeb6357570abaf4 | ac8e5e7d52bd54f1b3690aac154920e003dbba2e | /DA_Faster_ICR_CCR/lib/model/nms/nms_cpu.py | 0b6ab667b4a17d2522f767cc5f43477ad70043b4 | [] | no_license | wanghui-cloud/CCR-ICR | 16b4704cb1eff031825fb3052080fe76597626fd | 155ff46f12627c84bde3b9c55ab6a5c8e1d1cd11 | refs/heads/master | 2023-04-26T07:30:43.384645 | 2021-05-28T12:21:20 | 2021-05-28T12:21:20 | 371,691,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | from __future__ import absolute_import
import numpy as np
import torch
def nms_cpu(dets, thresh):
# dets [12000, 5]= proposals_single, scores_single.squeeze(1)
dets = dets.numpy()
# x1、y1、x2、y2、以及score赋值
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
order = scores.argsort()[::-1] # argsort函数返回的是数组值从小到大的索引值
# 保留的结果框集合
keep = []
while order.size > 0:
# 保留得分最高的一个的索引
i = order.item(0)
keep.append(i) # 将其作为保留的框
# 计算置信度最大的框(order[0])与其它所有的框(order[1:],即第二到最后一个)框的IOU,
xx1 = np.maximum(x1[i], x1[order[1:]]) # 逐位比较取其大者
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.maximum(x2[i], x2[order[1:]])
yy2 = np.maximum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h #计算相交框的面积
# 计算IOU:重叠面积/(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 取出IOU小于阈值的框
# 只有条件 (condition),没有x和y,则输出满足条件 (即非0) 元素的坐标 (等价于numpy.nonzero)
inds = np.where(ovr <= thresh)[0]
# 更新排序序列
order = order[inds + 1]
# 删除IOU大于阈值的框,因为从第二个数开始,当作第一个数,所以需要+1,如[1,2,3,4],将从[2,3,4]开始,
# 若选择第一个数2,下标为0,所以需要+1,才能对应原来数[1,2,3,4],选择为2.
return torch.IntTensor(keep) # 返回索引值
| [
"[email protected]"
] | |
541f0193507c2dc7add9c3db34f5c64f48609ab5 | 2b6116b967f6b02a6c62392058623ba8824f5ee2 | /deal/migrations/0037_auto_20190809_0846.py | fe6f69d2904c855aac6077b60b714befb45a1654 | [] | no_license | tayursky/med-crm | 68a16d771a91a9a5ff3e61acd00c08ad6297c405 | 8e39904968a8217b9cd4593acc3afa27ff4584ba | refs/heads/master | 2023-01-11T08:28:23.762631 | 2020-03-15T20:53:59 | 2020-03-15T20:53:59 | 247,546,343 | 0 | 0 | null | 2023-01-06T02:27:23 | 2020-03-15T20:30:05 | Python | UTF-8 | Python | false | false | 1,283 | py | # Generated by Django 2.2.1 on 2019-08-09 08:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0005_auto_20190802_0616'),
('deal', '0036_auto_20190801_1443'),
]
operations = [
migrations.AddField(
model_name='historicalservice',
name='default_master',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='company.User', verbose_name='Правщик по умолчанию'),
),
migrations.AddField(
model_name='service',
name='default_master',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='service_default_master', to='company.User', verbose_name='Правщик по умолчанию'),
),
migrations.AlterField(
model_name='service',
name='masters',
field=models.ManyToManyField(blank=True, limit_choices_to={'account__group__in': ['Правщики']}, related_name='service_masters', to='company.User', verbose_name='Правщики'),
),
]
| [
"[email protected]"
] | |
74f9f70c5cab3a26af146ed5bb0ee63971b5fea2 | 5ed2d0e107e4cdcd8129f418fdc40f1f50267514 | /bnb/PreferenceList/test.py | f69a3fdf4ef1e513313bbc0f839736e895ebbe5f | [] | no_license | tliu57/Leetcode | 6cdc3caa460a75c804870f6615653f335fc97de1 | c480697d174d33219b513a0b670bc82b17c91ce1 | refs/heads/master | 2020-05-21T03:14:07.399407 | 2018-07-08T18:50:01 | 2018-07-08T18:50:01 | 31,505,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | class Solution(object):
def preferenceList(self, preferences):
map = {}
degree = {}
result = []
if not preferences:
return result
for pref in preferences:
for elem in pref:
degree[elem] = 0
for pref in preferences:
for i in range(1, len(pref)):
for j in range(i):
if pref[j] not in map:
post_elem_set = set()
post_elem_set.add(pref[i])
map[pref[j]] = post_elem_set
else:
map[pref[j]].add(pref[i])
for key in map:
for elem in map[key]:
degree[elem] += 1
q = []
for c in degree:
if degree[c] == 0:
q.append(c)
while q:
char = q.pop(0)
result.append(char)
if char in map:
for c in map[char]:
degree[c] -= 1
if degree[c] == 0:
q.append(c)
return result
sol = Solution()
preferences = [
[3, 5, 7, 9],
[2, 3, 8],
[5, 8]
]
print sol.preferenceList(preferences)
| [
"[email protected]"
] | |
8071eb95a1b07f7e380d1653e9db5a38f2283703 | 6f67606189b27ab3dfd20d9fa4b5dab00beb4302 | /MetamorphicTests/all_mutants/sales_forecasting_file/81.py | 2d13423f31c1f829bcadf99cabe287b212b90293 | [
"Apache-2.0"
] | permissive | fjgao/Sales-forecasting-with-RNNs | b8d468946d5df8d694178ef0664717c62bf156b8 | 22b4639ecbb48381af53326ace94a3538201b586 | refs/heads/master | 2022-02-09T11:29:04.815900 | 2019-02-08T08:51:23 | 2019-02-08T08:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,639 | py | def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = ''
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | [
"[email protected]"
] | |
9873cc835d18dd5753bb7da09d533cd4103af5ed | 0682b9249e65c3bf0ee70f3c4fe85196562f1a95 | /tests/core/parameter/test_parameter_node.py | 3e1da62026cec97c59dd05aa327a0e469348d528 | [
"Apache-2.0"
] | permissive | perillaroc/takler | 7039cc3ba1e53be851993820fe8d684f84615fd2 | 654c2224e529c2f7c5fd600ee9272dcc24fd0287 | refs/heads/master | 2023-09-02T21:25:34.347695 | 2023-08-25T02:09:10 | 2023-08-25T02:09:10 | 23,487,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | import pytest
from takler.core import Parameter
@pytest.fixture
def simple_flow_with_parameter(simple_flow):
flow1 = simple_flow.flow1
flow1.add_parameter("ECF_HOME", "/home/johndoe")
flow1.add_parameter("NODES", 4)
flow1.add_parameter("TIME_INTERVAL", 0.1)
container1 = simple_flow.container1
container1.add_parameter("TASKS", 32)
task1 = simple_flow.task1
task1.add_parameter("FLAG", True)
return simple_flow
def test_add_parameter(simple_flow):
flow1 = simple_flow.flow1
flow1.add_parameter("ECF_HOME", "/home/johndoe")
flow1.add_parameter("NODES", 4)
flow1.add_parameter("TIME_INTERVAL", 0.1)
assert flow1.user_parameters["ECF_HOME"] == Parameter(name="ECF_HOME", value="/home/johndoe")
assert flow1.user_parameters["NODES"] == Parameter(name="NODES", value=4)
assert flow1.user_parameters["TIME_INTERVAL"] == Parameter(name="TIME_INTERVAL", value=0.1)
container1 = simple_flow.container1
container1.add_parameter("TASKS", 32)
assert container1.user_parameters["TASKS"] == Parameter(name="TASKS", value=32)
task1 = simple_flow.task1
task1.add_parameter("FLAG", True)
assert task1.user_parameters["FLAG"] == Parameter(name="FLAG", value=True)
def test_find_parameter(simple_flow_with_parameter):
flow1 = simple_flow_with_parameter.flow1
assert flow1.find_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
assert flow1.find_parameter("NO_EXIST") is None
container1 = simple_flow_with_parameter.container1
assert container1.find_parameter("TASKS") == Parameter("TASKS", 32)
assert container1.find_parameter("NO_EXIST") is None
assert container1.find_parameter("ECF_HOME") is None
task1 = simple_flow_with_parameter.task1
assert task1.find_parameter("FLAG") == Parameter("FLAG", True)
assert task1.find_parameter("NO_EXIST") is None
assert task1.find_parameter("TASKS") is None
assert task1.find_parameter("ECF_HOME") is None
def test_find_parent_parameter(simple_flow_with_parameter):
flow1 = simple_flow_with_parameter.flow1
assert flow1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
assert flow1.find_parent_parameter("NO_EXIST") is None
container1 = simple_flow_with_parameter.container1
assert container1.find_parent_parameter("TASKS") == Parameter("TASKS", 32)
assert container1.find_parent_parameter("NO_EXIST") is None
assert container1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
task1 = simple_flow_with_parameter.task1
assert task1.find_parent_parameter("FLAG") == Parameter("FLAG", True)
assert task1.find_parent_parameter("NO_EXIST") is None
assert task1.find_parent_parameter("TASKS") == Parameter("TASKS", 32)
assert task1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
| [
"[email protected]"
] | |
28dc8edf1052517b8b4c2dd925c7538b887ebf09 | 3958e68814826e7104c4f013ea2aac4e5d77223a | /test/test_accuracy_large_files.py | c0aa5296d3c686623a79dabbbb057db7662534fd | [
"Apache-2.0"
] | permissive | phutares/ocreval | 2a1c935d36922755e1d89769c45371d56d344c70 | 873a0de5796c0b9ccf07a549afdd30159a9e0b3e | refs/heads/master | 2023-04-19T06:41:15.393399 | 2021-04-16T20:57:09 | 2021-04-16T20:57:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,992 | py | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
"""
Tests accuracy on LARGE UTF-8 files.
"""
import random
import subprocess
import tempfile
import os.path as p
# Alias range as xrange in Python 3:
try:
xrange
except NameError:
xrange = range
# Create a Python 2/3 Unicode string literal:
try:
unicode
except NameError:
u = str
else:
u = lambda s: s.decode('UTF-8')
# Path to accuracy program
ACCURACY = p.join(p.dirname(p.dirname(p.realpath(__file__))),
'bin', 'accuracy')
assert p.exists(ACCURACY), 'Could not find ' + ACCURACY
# http://www.languagegeek.com/isolate/haidastory.html
corpus = u('''\
Aaniisuu tangaa g̱aging.ang ’wan suuga. ’Ll xidgwangaas, x̱uyaa’aa. Tllgu
ḵ’aawgyaa hllng.aaygi ’laa ḵyaang.aas. Ḵawdiuu gwaay g̱ud gwaa nang ḵadlaa
ḵ’ayg̱udyaas ’laagu ḵ’aawgaay g̱an ’laa g̱á ’laa xidaas. Á tl’l sg̱aana ḵidaads
’yaahlgaagaas g̱iinuus gangaang ’laagu gud gwii x̱iihlt’ahliyaagaas. Ga
sg̱aanag̱waa g̱ax̱aas ’laa t’isda ḵ’a sḵ’agilaang.aas, tll gwii x̱an, hahl gwii’ad
wah gwii’aa. G̱adagaas gyaanuu’asing g̱aalgaagaang ’wan suuga.
Nang kilsdlaas naag̱ag̱a.aw tadll chi’a’aawaagan. Sing ḵ’alg̱ada ’ll ḵaaxuhls
gyaan ’ll kindagaang.aas. Sda ’laa xid k’udahldats’aasii gyaan gagu ’laa
ḵ’aw’aawaasgu x̱an ’laa ḵ’aawgangas.
''')
dictionary = tuple(word for word in corpus.split())
alphabet = [char for char in corpus if char not in ' \n']
def one_in(n):
return random.choice(xrange(n)) == 1
def change_letter(word):
letter_index = random.choice(xrange(len(word)))
mutation = random.choice(alphabet)
return word[:letter_index] + mutation + word[letter_index + 1:]
if __name__ == '__main__':
import sys
amount_of_words = int(sys.argv[1]) if len(sys.argv) > 1 else 32768
# Create temporary files for each...
with tempfile.NamedTemporaryFile('wb') as correct_file,\
tempfile.NamedTemporaryFile('wb') as generated_file:
# Generate A LOT of random words
for _ in xrange(amount_of_words):
end = b'\n' if one_in(10) else b' '
word = random.choice(dictionary)
correct_file.write(word.encode('UTF-8'))
# Occasionally, typo a word in the generated file.
generated_word = change_letter(word) if one_in(1000) else word
generated_file.write(generated_word.encode('UTF-8'))
# Write a space or newline.
correct_file.write(end)
generated_file.write(end)
# Finish off the file with a new line and flush the output.
if end != b'\n':
correct_file.write(b'\n')
generated_file.write(b'\n')
correct_file.flush()
generated_file.flush()
# This will fail if accuracy itself fails.
subprocess.check_call([ACCURACY,
correct_file.name, generated_file.name])
| [
"[email protected]"
] | |
63d0f327304779eea8cc5e57fe84a233c6c7a91a | ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3 | /abc193/b/main.py | 0e3c7e0f78cd78c0c45811485390821a3ab19d13 | [] | no_license | kussy-tessy/atcoder | 5604919747242ee9740b9131bb6e168e96af0151 | ee917fa5a5218d4a9e72f710d0d844e7c203f13b | refs/heads/master | 2023-07-21T09:25:15.464881 | 2021-09-04T14:06:02 | 2021-09-04T14:06:02 | 311,221,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/env python3
N = int(input())
APXs = []
for _ in range(N):
APXs.append(tuple(map(int,(input().split()))))
ans = float('inf')
for A, P, X in APXs:
if A < X:
ans = min(P, ans)
print(-1 if ans == float('inf') else ans) | [
"[email protected]"
] | |
e52d4984fc5ca34ed8bb3b24514f0d20e84ac60e | ac235a23f22be0d6f1818bb53902177f9969813a | /tests/lib-injection/dd-lib-python-init-test-django-uvicorn/django_app.py | dc0e14bad8c09edf3e6da8eadcefc9bd8dde30c3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 414 | py | import os
from django.core.asgi import get_asgi_application
from django.http import HttpResponse
from django.urls import path
filepath, extension = os.path.splitext(__file__)
ROOT_URLCONF = os.path.basename(filepath)
DEBUG = False
SECRET_KEY = "fdsfdasfa"
ALLOWED_HOSTS = ["*"]
def index(request):
return HttpResponse("test")
urlpatterns = [
path("", index),
]
application = get_asgi_application()
| [
"[email protected]"
] | |
5dd37b5562aa1c5c76f36b7ae29235262b6fa21e | acf9787f29749a7cb3e45eb8f99f6022281ea0ec | /src/clustering.py | 871f3a07fa870cb155ea9cbfafea64b6871bdaaf | [] | no_license | abunuwas/ecs_admin | 35061dd572b7515932a7b6d594abcbc3cac872e9 | f5d0cbf62c08f0b4089ba25bd3335f4304ed9de7 | refs/heads/master | 2020-04-24T02:07:03.900116 | 2016-09-12T17:16:09 | 2016-09-12T22:31:03 | 67,236,727 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,708 | py | import boto3
from ecs import ECS
from ec2 import EC2Instance
from iam import IAM
from sns import SNS
from lambda_func import Lambda
from core_utils import filter_args
from exceptions import MissingValueError #EntityExistsError, LimitExceededError, DoesNotExistError, InvalidOPerationError
from policies import ec2_trust_policy, ecs_role_policy
class Cluster(ECS):
"""
"""
def __init__(self,
app_name,
image,
user_data_file,
task_definition=None,
service=None,
desired_count=1,
max_health=None,
min_health=None,
container_definitions=None,
key_name=None,
security_groups=None,
user_data=None,
lambda_role=None,
aws_parameters=None
):
'''
:type user_data_file: string
:param user_data_file: path a .txt file containing bash
commands to be executed on an EC2 instance at launch
time. At least, the commands must set the following
parameters:
`ECS_CLUSTER`
and
{"auths":
{
`dkr.ecr.region.awsamazon.com`: { "auth": `auth_tocken` }
"https://index.docker.io/v1/": { "auth": `auth_token` }
}
must be set in the following file: /etc/ecs/ecs.config in the
EC2 instance. See the AWS documentation for further information:
##############################URL.
:type max_health: int
:param max_health: maximumPercent for the deployment
configuration of an ECS service. Default value in
the AWS API is 200.
:type min_health: int
:param min_health: minimumHealthyPercent for the deployment
configuration of an ECS service. Default value in the
AWS API is 50.
:type key_name: string
:param key_name: name of the ssh key pair to be used when
creating EC2 instances within the cluster.b
'''
self.app_name = app_name
# The following parameters are automatically assigned
# based on the name of the application following a
# default naming structure. This is to ensure that we
# follow a systematic and ordered approachto naming our
# resources that makes it easier to identify and locate
# them.
# Users are free to modify these attributes by repointing
# them to different strings. This might be warranted in
# cases in which some flexibility is required. For this
# reason, the attributes are not readonly.
# Users who modify these attributes are responsible for
# keeping track of the resources they create associated
# with the cluster.
self.cluster_name = app_name+'_cluster'
self.service_name = app_name+'_service'
self.task_name = app_name+'_task'
if container_definitions is None:
self.container_definitions = []
else:
self.container_definitions = container_definitions
self.task_definition = task_definition
self.service = service
self.image = image
self.desired_count = desired_count
self.min_health = min_health
self.max_health = max_health
self.key_name = key_name
self.user_data_file = user_data_file
self.user_data = user_data
self.security_groups = security_groups
self._instances = []
self.lambda_role = lambda_role
#self.default_lambda_role = 'lambda_ecs_role'
self.aws_parameters = aws_parameters
self._make_clients()
## Allow passing aws connection parameters to get
## the connections.
self.ec2 = EC2Instance(None, None, None)
self.iam = IAM()
self.sns = SNS()
self.awslambda = Lambda()
self._cluster = None
@property
def cluster(self):
return self._cluster
@property
def instances(self):
return self._instances
def _make_clients(self):
if self.aws_parameters is not None:
self.ecs_client = boto3.client('ecs', **self.aws_parameters)
else:
try:
self.ecs_client = boto3.client('ecs')
except Exception as e:
print(str(e))
return None
def get_ready(self):
if self.container_definitions == []:
self.define_container()
self.user_data = self.ec2.get_user_data(self.user_data_file)
self.ec2.get_ready()
def create(self):
#lambda_role = lambda_ecs
#task_role = self.create_role(path=self.app_name,
# role_name=self.task_name,
# policy_trust=task_role_policy)
if container is None:
msg = '''Please define a container to run within the cluster.
You can run the Cluster.get_ready() method to obtain
a default definition.
'''
raise MissingValueError(msg)
cluster = self.create_cluster()
self.create_task_definition()
self.create_service()
self.profile = default_ec2_instance_profile()
return profile
def clearup(self):
pass
def create_cluster(self):
cluster = ECS.create_cluster(self, name=self.cluster_name)
self._cluster = cluster['clusterArn']
return self.cluster
def define_container(self, image=None, name=None, **kwargs):
if image is None:
image = self.image
if name is None:
name = self.app_name
container = ECS.define_container(self, image=image, name=name, **kwargs)
self.container_definitions.append(container)
return None
def create_task_definition(self, *args, **kwargs):
task_definition = ECS.create_task_definition(self, family=self.app_name, containers=self.container_definitions, **kwargs)
self.task_definition = task_definition['taskDefinitionArn']
return self.task_definition
def list_task_definitions(self):
return ECS.list_task_definitions(self, self.app_name)
def list_tasks(self, *args, **kwargs):
return ECS.list_tasks(self, self.cluster_name)
def describe_tasks(self):
return ECS.describe_tasks(self, self.cluster_name)
def stop_tasks(self, *args, **kwargs):
return ECS.stop_tasks(self, cluster=self.cluster_name)
def create_service(self, **kwargs):
service = ECS.create_service(self,
cluster=self.cluster_name,
service_name=self.service_name,
task_definition=self.task_definition,
desired_count=self.desired_count,
max_health=self.max_health,
min_health=self.min_health,
**kwargs
)
self.service = service
return self.service
def list_services(self, *args, **kwargs):
return ECS.list_services(self, cluster=self.cluster_name)
def describe_services(self):
return ECS.describe_services(self, self.cluster_name)
def set_count_services_zero(self, *args, **kwargs):
return ECS.set_count_services_zero(self, cluster=self.cluster_name, services=self.list_services())
def delete_service(self, service, *args, **kwargs):
return ECS.delete_service(self, cluster=self.cluster_name, service=service)
def delete_services(self, services, *args, **kwargs):
return ECS.delete_services(self, cluster=self.cluster_name, services=services)
def delete(self):
return ECS.delete_cluster(self, cluster=self.cluster_name)
def clearup(self):
return ECS.clearup_cluster(self, self.cluster_name)
def create_role(self, **kwargs):
return iam.create_role(**kwargs)
def list_roles(self, **kwargs):
return self.iam.list_roles(**kwargs)
def role_has_permission(self, role_name, permissions):
return self.iam.role_has_permission(role_name, permissions)
def roles_have_permissions(self, roles, permissions):
return self.iam.roles_have_permissions(roles, permissions)
def list_policies(self, **kwargs):
return self.iam.list_policies(**kwargs)
def default_ec2_instance_profile(self):
ecs_instance_role = create_role(role_name='ec2InstanceRole', policy_trust=ec2_trust_policy)
ecs_policy = create_policy(policy_name='ecs_role_policy', policy_document=ecs_role_policy)
iam.attach_policy(role_name='ec2InstanceRole', policy_arn=ecs_policy)
profile = create_instance_profile(name='ec2InstanceProfileECS')
response = ec2.add_role2profile(role_name='ec2InstanceRole',
profile_name='ec2InstanceProfileECS')
return profile
def get_default_security_group(self):
pass
def default_ecs_lambda_role(self):
#lambda_role = iam_client.create_role(role_name='lambda_ecs_role', policy_trust=task_role_policy)
#lambda_ecs_policy = iam_client.create_policy(policy_name='lambda_ecs',
# policy_document=lambda_ecs_policy,
# description='Standard policy allowing Lambda functions to describe and update ECS services.'
# )
#if not role_has_permissions(lambda_role, permissions=['ecs:DescribeServices', 'ecs:UpdateService', 'logs:*']):
# iam_client.attach_policy('lambda_ecs_role', policy_arn=lambda_ecs_policy)
#return None
pass
def create_lambda(self, **kwargs):
# return lambda_func.create_lambda(**kwargs)
pass
def add_permission(self, **kwargs):
# return lambda_client.add_permissions(**kwargs)
pass
def create_notification(self, **kwargs):
# return sns_client.create_notification(**kwargs)
pass
def create_default_scaleup_lambda(self, metric_):
default_notification_scaleupdown = None
pass
def create_default_scaledown_lambda(self):
pass
def create_default_lambdas(self):
create_default_scaleup_lambda()
create_default_scaledown_lambda()
def create_alarm(self, **kwargs):
# return cloudwatch_client.create_alarm(**kwargs)
pass
def set_alarm_state(self, **kwargs):
# return cloudwatch_client.set_alarm_state(**kwargs)
pass
def list_resources(self):
cluster_name = None
task_definitions = None
running_tasks = None
service = {
'service_name': None,
'desired_count': 0,
'running_tasks': 0,
'pending_tasks': 0,
'deployment_config': {
'min_health': 0,
'max_health': 0
}
}
lambdas = []
metrics = None
alarms = None
sns_topics = None
ec2 = []
def launch_ec2(self, key_name, security_groups, profile_arn):
# ec2_client.launch_instance()
instance = launch_ec2(values())
return instance
cluster_name = 'xmpp_component'
cluster = Cluster(app_name=cluster_name, image='abunuwas/xmpp-component:v.0.0.1', user_data_file='docker-login.txt')
cluster.get_ready()
#instance = launch_ec2(key_name=key_name, security_groups=security_groups, user_data=, profile_arn=user_data)
#provision_default_ecs_lambda_role()
#lambda_role = create_role(role_name=lambda_role,
# policy_trust=lambda_role_trust_policy)
#from policies import lambda_ecs_policy
#lambda_ecs_policy = create_policy(policy_name='lambda_ecs_test',
# policy_document=lambda_ecs_policy,
# description='Standard policy allowing Lambda functions to describe and update ECS services.'
# )
#permission = add_permission(function_name='lambda_ecs',
#if not role_has_permissions(role_name=lambda_ecs, permissions=['ecs:DescribeServices', 'ecs:UpdateService', 'logs:*']):
#request = attach_policy()
#provision_ecs_lambda_role(lambda_role_name=lambda_ecs)
#function = create_lambda(name=lambda_name+'_scaleup',
# role_arn=lambda_role,
# handler=handler,
# code_file=code_file,
# description=description
# )
#test = setup_cluster(lambda_ecs='lambda_ecs_role')
#print(test)
#roles = cluster.list_roles()
#for role in roles:
# print(role)
#roles_names = [role['RoleName'] for role in roles]
#print('aws-elasticbeanstalk-ec2-role' in roles_names)
#services = cluster.list_services()
#print(services) # -> ['arn:aws:ecs:eu-west-1:876701361933:service/xmpp_component_service']
#tasks = cluster.list_tasks(cluster_name)
#print(tasks) #-> []
#definitions = cluster.list_task_definitions()
#print(definitions)
#print('arn:aws:ecs:eu-west-1:876701361933:task-definition/xmpp_component:1' in definitions)
#descriptions = [cluster.describe_task_definition(definition) for definition in definitions]
#print(descriptions[0])
# -> {'volumes': [], 'requiresAttributes': [{'name': 'com.amazonaws.ecs.capability.ecr-auth'}], 'revision': 1, 'status': 'ACTIVE', 'containerDefinitions': [{'portMappings': [{'protocol': 'tcp', 'containerPort': 8080, 'hostPort': 9090}], 'essential': True, 'command': [], 'environment': [], 'cpu': 10, 'links': [], 'name': 'sample-app', 'memory': 300, 'mountPoints': [], 'image': '876701361933.dkr.ecr.eu-west-1.amazonaws.com/abunuwas/python_app:latest', 'volumesFrom': [], 'entryPoint': []}], 'taskDefinitionArn': 'arn:aws:ecs:eu-west-1:876701361933:task-definition/console-sample-app-static:1', 'family': 'console-sample-app-static'}
services_description = cluster.describe_services()
print(services_description)
# -> [{'deployments': [{'createdAt': datetime.datetime(2016, 9, 6, 15, 45, 16, 690000, tzinfo=tzlocal()), 'runningCount': 0, 'desiredCount': 1, 'updatedAt': datetime.datetime(2016, 9, 6, 15, 45, 16, 690000, tzinfo=tzlocal()), 'taskDefinition': 'arn:aws:ecs:eu-west-1:876701361933:task-definition/xmpp_component:7', 'status': 'PRIMARY', 'id': 'ecs-svc/9223370563678059106', 'pendingCount': 0}], 'runningCount': 0, 'deploymentConfiguration': {'minimumHealthyPercent': 50, 'maximumPercent': 150}, 'taskDefinition': 'arn:aws:ecs:eu-west-1:876701361933:task-definition/xmpp_component:7', 'createdAt': datetime.datetime(2016, 9, 6, 15, 45, 16, 690000, tzinfo=tzlocal()), 'events': [{'createdAt': datetime.datetime(2016, 9, 9, 3, 53, 34, 717000, tzinfo=tzlocal()), 'id': 'b140e3fb-b19a-4cff-baf4-a6a7a4589b58', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 8, 21, 52, 5, 574000, tzinfo=tzlocal()), 'id': '603c509d-ddf8-4ca5-a1c7-f6d5a8f6dc47', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 8, 15, 51, 24, 9000, tzinfo=tzlocal()), 'id': 'e9058d43-f508-4140-86b5-04cf5fe7bd44', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 8, 9, 51, 21, 866000, tzinfo=tzlocal()), 'id': '8eb52637-b583-4f53-aea1-3e17413eb2db', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 8, 3, 49, 46, 177000, tzinfo=tzlocal()), 'id': '6bb6c20a-0fcf-4e0b-87b6-15e77322013d', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 7, 21, 49, 16, 550000, tzinfo=tzlocal()), 'id': '8dba18ab-4dd9-40fe-93f0-72b453e35b4c', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 7, 15, 48, 42, 277000, tzinfo=tzlocal()), 'id': '137c846f-be76-4750-b376-c83eadf2387e', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 7, 9, 47, 43, 926000, tzinfo=tzlocal()), 'id': '0451faea-60be-4142-ac4c-7199e8c33dca', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 7, 3, 46, 25, 984000, tzinfo=tzlocal()), 'id': 'f2cd5a9e-e5e5-4777-a710-1a17481175f0', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 6, 21, 45, 55, 486000, tzinfo=tzlocal()), 'id': 'aac7c71f-cfb5-4e38-9b6a-a462061ee142', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}, {'createdAt': datetime.datetime(2016, 9, 6, 15, 45, 24, 646000, tzinfo=tzlocal()), 'id': 'b2238f4a-ee05-4cb4-801e-ca2a8e2b1d16', 'message': '(service xmpp_component_service) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.'}], 'pendingCount': 0, 'loadBalancers': [], 'desiredCount': 1, 'serviceName': 'xmpp_component_service', 'clusterArn': 'arn:aws:ecs:eu-west-1:876701361933:cluster/xmpp_component_cluster', 'status': 'ACTIVE', 'serviceArn': 'arn:aws:ecs:eu-west-1:876701361933:service/xmpp_component_service'}]
#service_task_def = services_description[0]['taskDefinition']
#print(service_task_def)
# -> arn:aws:ecs:eu-west-1:876701361933:task-definition/xmpp_component:7
#service_task_def_description = cluster.describe_task_definition(service_task_def)
#print(service_task_def_description)
# -> {'status': 'ACTIVE', 'revision': 7, 'requiresAttributes': [{'name': 'com.amazonaws.ecs.capability.docker-remote-api.1.17'}], 'family': 'xmpp_component', 'volumes': [], 'taskDefinitionArn': 'arn:aws:ecs:eu-west-1:876701361933:task-definition/xmpp_component:7', 'containerDefinitions': [{'readonlyRootFilesystem': True, 'volumesFrom': [], 'mountPoints': [], 'image': 'abunuwas/xmpp-component:v.0.0.1', 'environment': [], 'memory': 100, 'name': 'xmpp_component', 'essential': True, 'cpu': 100, 'portMappings': []}]}
#cluster.set_count_services_zero()
#tasks_descriptions = cluster.describe_tasks()
#print(tasks_descriptions)
#response = cluster.stop_tasks()
#print(response)
#response = cluster.clearup()
#print(response)
| [
"[email protected]"
] | |
e3d5ef7c2bf734988331695db571c4c9fba3f293 | a7744d27342514682189ff5fac7f198b380d1997 | /robonova/kinematics/kinelib/dh.py | 83496565a6147db910fb662d666da5fa958f5cd6 | [] | no_license | roboshepherd/myro-epuck | b6469cb2c30b50d625068eb9e306b8ac2cbe4fad | 564b7f2a7d262b11d8adc86b5f5efb1b825aef53 | refs/heads/master | 2020-05-17T00:21:56.339519 | 2010-03-16T16:36:26 | 2010-03-16T16:36:26 | 565,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | #DH Matrix representation of manipulator kinematics
#
# The original robot toolbox functions used a DH matrix to describes the
# kinematics of a manipulator in a general way.
#
# For an n-axis manipulator, DH is an nx4 or nx5 matrix, whose rows
# comprise
#
# 1 alpha link twist angle
# 2 A link length
# 3 theta link rotation angle
# 4 D link offset distance
# 5 sigma joint type, 0 for revolute, non-zero for prismatic
#
# If the last column is not given the manipulator is all-revolute.
#
# The first 5 columns of a DYN matrix contain the kinematic parameters
# and maybe used anywhere that a DH kinematic matrix is required -- the
# dynamic data is ignored.
#
# The functionality of the DH matrix has been replaced by the ROBOT object.
#
# See also: ROBOT, DYN.
# MOD.HISTORY
# 1/95 reverse labels on A & D
# $Log: dh.m,v $
# Revision 1.2 2002/04/01 11:47:11 pic
# General cleanup of code: help comments, see also, copyright, remnant dh/dyn
# references, clarification of functions.
#
# $Revision: 1407 $
# Copyright (C) 1993-2002, by Peter I. Corke
| [
"[email protected]"
] | |
193ce4e88456d4dcb2d636d9453aa8ed841f6d0e | 407490cf6e79695276236480bb8c00dd97a6b007 | /weekly_reports.py | 2f1432a1d63e51a0d84a222f554eabf428dd6a6d | [] | no_license | skreynolds/weekly_reports_dev | d17ac47c4642d946d294634152da77defbb2bd85 | cee840e62f21cdc69f6320f2ec0dc538743a1940 | refs/heads/master | 2020-04-06T16:43:45.733017 | 2018-11-16T02:27:27 | 2018-11-16T02:27:27 | 157,631,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,769 | py | #!/usr/bin/python
import psycopg2
from utils.config import config
from utils.file_conversion import *
from xref.tables import *
from xref.fixed_widths import *
# Specify the current year the routine is running
DATE = 2018
if __name__ == '__main__':
#############################################################
# RUN VALIDATION TO ENSURE SUCCESSFUL EXECUTION
#############################################################
# SECTION OF CODE SHOULD CHECK FOR ESSENTIAL FILES AND ESSENTIAL
# CONDITIONS TO ENSURE THAT THE SCRIPT WILL SUCCESSFULLY EXECUTE
# Connect to the PostgreSQL database server
conn = None
try:
#############################################################
# PROCESS FIXED WIDTH FILES OUTPUT FROM CALLISTA
#############################################################
# File path for VET_weekly_AHC_YYYY.txt
file = './data/VET_weekly_AHC_' + str(DATE) + '.txt'
# Execute file conversion script
convert_fw_to_csv_AHC(file, weekly_ahc_slices)
# File path for VET_weekly_AHC_[YYYY-1].txt
file = './data/VET_weekly_AHC_' + str(DATE-1) + '.txt'
# Execute file conversion script
convert_fw_to_csv_AHC(file, weekly_ahc_slices)
# File path for VET_Course_Completions_2018.txt
file = './data/VET_Course_Completions_' + str(DATE) + '.txt'
# Execute file conversion script
convert_fw_to_csv_completions(file, completions_slices)
# File path for VET_2018_Apprentice.txt
file = './data/VET_2018_Apprentice.txt'
# Execute file conversion script
convert_fw_to_csv_apprentices(file, apprentices_slices)
#############################################################
# CONNECT TO THE WEEKLY REPORTS DATABASE
#############################################################
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
#############################################################
# BUILD XLOOKUP
#############################################################
# create all of the xlookup tables
for table_name in xlookup_tables.keys():
# create table
cur.execute(xlookup_tables[table_name])
# define the file path for loading the data
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\xlookup\\' + table_name + '.txt'
# load the data into the xlookup table
copy_sql = """
COPY """ + table_name + """ FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
#############################################################
# BUILD TABLES
#############################################################
# create errors_for_correction_by_vet_teams table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_teams'))
# create errors_for_correction_by_vet_stats_officer table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_stats_officer'))
# create errors_for_correction_by_vet_stats_officer table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_teams_course_intention'))
# create vet_course_completion_YYYY table
cur.execute(tables['xref_vet_course_completions_YYYY_template'].format('vet_course_completions_' + str(DATE)))
# create weekly_current table - (VET_weekly_AHC_YYYY is imported later)
cur.execute(tables['xref_weekly_template'].format('current'))
# create weekly_current table - (VET_weekly_AHC_[YYYY-1] is imported later)
cur.execute(tables['xref_weekly_template'].format(str(DATE-1)))
# create vet_course_completions table - (VET_Course_Completions_YYYY imported later)
cur.execute(tables['xref_course_completions'])
# create vet_apprentices table - (VET_YYYY_Apprentice is imported later)
cur.execute(tables['xref_vet_apprentice_template'])
# create student table
cur.execute(tables['xref_student_template'])
# create student_course_attempt table
cur.execute(tables['xref_student_course_attempt_template'])
# create student_unit_attempt table
cur.execute(tables['xref_student_unit_attempt_template'])
# create activity_pattern_trend table
cur.execute(tables['xref_activity_pattern_trend_template'])
# create unresulted_sua_2017 table
cur.execute(tables['xref_unresulted_sua_template'].format(str(DATE-1)))
# create team_activity table
cur.execute(tables['xref_team_activity_template'])
# create apprentice_sua table
cur.execute(tables['xref_apprentice_sua_template'])
# create apprentice_course table
cur.execute(tables['xref_apprentice_course_template'])
# create xref_vfh_unit_tp table
cur.execute(tables['xref_vfh_unit_tp_template'])
#############################################################
# IMPORT DATA TO TABLES
#############################################################
############################################################
# Import the VET_weekly_AHC_YYYY.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_weekly_AHC_' + str(DATE) + '.csv'
copy_sql = """
COPY weekly_current FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_weekly_AHC_[YYYY-1].csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_weekly_AHC_' + str(DATE-1) + '.csv'
copy_sql = """
COPY weekly_""" + str(DATE-1) + """ FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_Course_Completions_YYYY.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_Course_Completions_' + str(DATE) + '.csv'
copy_sql = """
COPY vet_course_completions FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_2018_Apprentice.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_' + str(DATE) + '_Apprentice.csv'
copy_sql = """
COPY vet_apprentice FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the xref_vfh_unit_tp.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\xlookup\\xref_vfh_unit_tp.txt'
copy_sql = """
COPY xref_vfh_unit_tp FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
#############################################################
# RUN QUERIES TO BUILD REPORTS AND ERROR TABLES
#############################################################
#############################################################
# EXPORT DATA
#############################################################
#############################################################
# CLOSE THE DATABASE
#############################################################
# close the communication with the PostgreSQL
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.') | [
"[email protected]"
] | |
0b2b34071fffde256b2760d76923b04e0df0a9a3 | 02dc1f70da529c7c2aa45dcfe5e0a3aeeb1f98cc | /src/063_unique_paths_ii/063_unique_paths_ii.py | d86011baf7cc85f588f4a5609c367411ac6c3ac4 | [] | no_license | ypliu/leetcode-python | 2a5a14de6310cae19b9cc42091d81586e697fffb | 13e61c13c406a73debcfc996937cf16f715d55d1 | refs/heads/master | 2020-03-27T06:17:32.303442 | 2019-02-24T14:50:11 | 2019-02-24T14:50:11 | 146,094,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid or 0 == len(obstacleGrid) or 0 == len(obstacleGrid[0]) or 0 != obstacleGrid[0][0]:
return 0
m, n = len(obstacleGrid), len(obstacleGrid[0])
res = [0 for _ in xrange(n)]
for j in xrange(n-1, -1, -1):
if 0 == obstacleGrid[m-1][j]:
res[j] = 1
elif 1 == obstacleGrid[m-1][j]:
break
else:
print 'Ilegal value: %d at (%d, %d).' %(obstacleGrid[m-1][j], (m-1), j)
return
for i in xrange(m-2, -1, -1):
if 1 == obstacleGrid[i][-1]:
res[-1] = 0
for j in xrange(n-2, -1, -1):
val = obstacleGrid[i][j]
if 1 == val:
res[j] = 0
elif 0 == val:
res[j] += res[j+1]
else:
print 'Ilegal value: %d at (%d, %d).' %(val, i, j)
return
return res[0]
# debug
s = Solution()
print s.uniquePathsWithObstacles([ [0,0,0], [0,1,0], [0,0,0] ])
print s.uniquePathsWithObstacles([ [0,0,0], [0,1,0], [0,2,0] ])
| [
"[email protected]"
] | |
01555860ba4b5b854365fb301034b8d6369a242b | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/webrtc/tools/internal_tools.gyp | bee8a8bf0e2186703fd1d761f9891e99b7d53ecf | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-takuya-ooura",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MS-LPL",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 842 | gyp | # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This file is used for internal tools used by the WebRTC code only.
{
'includes': [
'../build/common.gypi',
],
'targets': [
{
'target_name': 'command_line_parser',
'type': 'static_library',
'sources': [
'simple_command_line_parser.h',
'simple_command_line_parser.cc',
],
'dependencies': [
'<(webrtc_root)/base/base.gyp:gtest_prod',
],
}, # command_line_parser
],
}
| [
"[email protected]"
] | |
21fdc4b381f624fa34e85401ed931bd31dd672a9 | 1e6871bc3bc87b67b1a18a0c69f17d901e1a1a7f | /tests/test_api_v1.py | 285f2da27d336deb7e49dec7ffd00bb72e7ee108 | [
"Apache-2.0"
] | permissive | fabric8-analytics/fabric8-analytics-jobs | 24ada2f21c728840df935be792c744839535e094 | 545b932a0eb4acac5f04753010dca446b0425a6a | refs/heads/master | 2023-04-20T19:53:42.321637 | 2023-03-23T12:14:18 | 2023-03-23T12:14:18 | 89,703,953 | 6 | 29 | Apache-2.0 | 2023-03-23T12:14:20 | 2017-04-28T12:27:05 | Python | UTF-8 | Python | false | false | 483 | py | """Tests for the module 'api_v1'."""
# TODO enable when new test(s) will be added
# from f8a_jobs.api_v1 import *
class TestApiV1Functions(object):
"""Tests for the module 'api_v1'."""
def setup_method(self, method):
"""Set up any state tied to the execution of the given method in a class."""
assert method
def teardown_method(self, method):
"""Teardown any state that was previously setup with a setup_method call."""
assert method
| [
"[email protected]"
] | |
b56c74b0a911abe2b46939ff7fcbdd05407bb9e3 | 00414b9d72c922b873cc2ebcb4d1ce068de5007f | /src/backend/partaj/core/migrations/0016_add_referral_activity.py | d7cf100798aec83db3fde67ef48c14dd605cf6a7 | [
"MIT"
] | permissive | MTES-MCT/partaj | 1de9691dc6e7615c1d228a0e39c9208b97222dab | 22e4afa728a851bb4c2479fbb6f5944a75984b9b | refs/heads/main | 2023-08-07T08:22:30.290701 | 2023-08-04T16:57:38 | 2023-08-04T17:22:26 | 237,007,942 | 4 | 3 | MIT | 2023-09-14T19:10:26 | 2020-01-29T14:54:46 | Python | UTF-8 | Python | false | false | 3,466 | py | # Generated by Django 3.0.5 on 2020-05-20 13:05
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0015_add_referral_answer"),
]
operations = [
migrations.CreateModel(
name="ReferralActivity",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
help_text="Primary key for the referral activity as UUID",
primary_key=True,
serialize=False,
verbose_name="id",
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="created at"),
),
(
"verb",
models.CharField(
choices=[
("assigned", "assigned"),
("answered", "answered"),
("created", "created"),
],
help_text="Verb expressing the action this activity represents",
max_length=50,
verbose_name="verb",
),
),
(
"item_object_id",
models.CharField(
blank=True,
help_text="ID of the linked item",
max_length=255,
verbose_name="item object id",
),
),
(
"actor",
models.ForeignKey(
blank=True,
help_text="User who generated this activity",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="referral_activity",
related_query_name="referral_activity",
to=settings.AUTH_USER_MODEL,
verbose_name="actor",
),
),
(
"item_content_type",
models.ForeignKey(
blank=True,
help_text="Model for the linked item",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
verbose_name="item content type",
),
),
(
"referral",
models.ForeignKey(
help_text="Referral on which the activity took place",
on_delete=django.db.models.deletion.CASCADE,
related_name="activity",
related_query_name="activity",
to="core.Referral",
verbose_name="referral",
),
),
],
),
]
| [
"[email protected]"
] | |
334590c3e97d988d696edd6ac40aef16b17a0f72 | 29a04fe1221acab7b3d799a4c25db8fd8c141995 | /reagent/training/world_model/compress_model_trainer.py | 7adc1b03ff2630541e6440d402d5e0bc505b1449 | [
"BSD-3-Clause"
] | permissive | jaynotleno/ReAgent | fb588656890ac9d2b19618528ae21bb750a6eaa6 | acb98f8de7a5604487cd921545b631fdd2541021 | refs/heads/master | 2023-04-18T05:45:24.823728 | 2021-04-22T06:35:01 | 2021-04-22T06:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,228 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import Seq2RewardTrainerParameters
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import gen_permutations
from reagent.training.world_model.seq2reward_trainer import get_Q
logger = logging.getLogger(__name__)
class CompressModelTrainer(ReAgentLightningModule):
""" Trainer for fitting Seq2Reward planning outcomes to a neural network-based policy """
def __init__(
self,
compress_model_network: FullyConnectedNetwork,
seq2reward_network: Seq2RewardNetwork,
params: Seq2RewardTrainerParameters,
):
super().__init__()
self.compress_model_network = compress_model_network
self.seq2reward_network = seq2reward_network
self.params = params
# permutations used to do planning
self.all_permut = gen_permutations(
params.multi_steps, len(self.params.action_names)
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
torch.optim.Adam(
self.compress_model_network.parameters(),
lr=self.params.compress_model_learning_rate,
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
loss, accuracy = self.get_loss(training_batch)
detached_loss = loss.cpu().detach().item()
accuracy = accuracy.item()
logger.info(
f"Seq2Reward Compress trainer MSE/Accuracy: {detached_loss}, {accuracy}"
)
self.reporter.log(mse_loss=detached_loss, accuracy=accuracy)
yield loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int):
mse, acc = self.get_loss(batch)
detached_loss = mse.cpu().detach().item()
acc = acc.item()
state_first_step = batch.state.float_features[0]
# shape: batch_size, action_dim
q_values_all_action_all_data = (
self.compress_model_network(state_first_step).cpu().detach()
)
q_values = q_values_all_action_all_data.mean(0).tolist()
action_distribution = torch.bincount(
torch.argmax(q_values_all_action_all_data, dim=1),
minlength=len(self.params.action_names),
)
# normalize
action_distribution = (
action_distribution.float() / torch.sum(action_distribution)
).tolist()
self.reporter.log(
eval_mse_loss=detached_loss,
eval_accuracy=acc,
eval_q_values=[q_values],
eval_action_distribution=[action_distribution],
)
return (detached_loss, q_values, action_distribution, acc)
def get_loss(self, batch: rlt.MemoryNetworkInput):
# shape: batch_size, num_action
compress_model_output = self.compress_model_network(
batch.state.float_features[0]
)
state_first_step = batch.state.float_features[0]
target = get_Q(
self.seq2reward_network,
state_first_step,
self.all_permut,
)
assert (
compress_model_output.size() == target.size()
), f"{compress_model_output.size()}!={target.size()}"
mse = F.mse_loss(compress_model_output, target)
with torch.no_grad():
# pyre-fixme[16]: `Tuple` has no attribute `indices`.
target_action = torch.max(target, dim=1).indices
model_action = torch.max(compress_model_output, dim=1).indices
accuracy = torch.mean((target_action == model_action).float())
return mse, accuracy
def warm_start_components(self):
logger.info("No warm start components yet...")
components = []
return components
| [
"[email protected]"
] | |
4c07dcbfb8366a5624a377756e0b8e8daa6b8ed4 | 0018a3b0429ce07f026534b61274e8c5fa25a0ef | /tests/test_crawler.py | 2896f6fe151f9a66353f6aa3029c8bac7b4ce43e | [] | no_license | carlosmaniero/cptm_analytics | ee184bf44efc0170c11261323f8214041d594d53 | b1e3d1da081e429a48ce9b8fb4e9d56efd1217d5 | refs/heads/master | 2021-01-01T04:33:42.996511 | 2016-05-24T20:43:30 | 2016-05-24T20:43:30 | 59,150,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | import pytest
from tornado import gen
from crawler.crawler import Crawler
from crawler.data import CrawlerDataControl
from crawler.tasks import CrawlerTasks
from tests import setup_module # NOQA
def test_download_data():
'''
Run a single test downloading the CPTM data and checking the status_code
using the Crawler.download_data().
'''
crawler = Crawler()
response = crawler.download_data()
assert response['status_code'] == 200
def test_parse_content():
'''
Test the Crawler.parse_content() method. This check if all line from the
Crawler.LINES is in parsed_content function return.
'''
crawler = Crawler()
response = crawler.download_data()
parsed = crawler.parse_content(response['content'])
for line in Crawler.LINES:
assert line in parsed
@pytest.mark.gen_test
def test_download_task():
'''
Test the CrawlerTask.task_download_data() this will run the task, and
check if this work comparing if the total of responses of the database is
increased before 3 seconds.
'''
tasks = CrawlerTasks()
data = CrawlerDataControl()
# Check for downloaded data
total_downloaded = yield data.count_response()
# Running downloads task
tasks.task_download_data()
# Wait for 2 seconds
yield gen.sleep(2)
# Check fot downloaded date
new_total = yield data.count_response()
# Assert if the crawler works
assert new_total > total_downloaded
@pytest.mark.gen_test
def test_process_task():
'''
This test will call the CrawlerTasks.task_process_data() and will compare
if the response collection will be decreased and the processed collection
are increased.
'''
tasks = CrawlerTasks()
data = CrawlerDataControl()
# Check for downloaded data
total_downloaded = yield data.count_response()
# Check if no responses found
if total_downloaded == 0:
# Running downloads task
task_download = tasks.task_download_data()
total = 0
while total_downloaded == 0:
total += 1
total_downloaded = yield data.count_response()
# Wait for 3 seconds to get a response
# If this fails check for CPTM conection
assert total <= 3
yield gen.sleep(1)
# Stop the task_download
task_download.cancel()
total_downloaded = yield data.count_response()
# Check total processed in the database
total_processed = yield data.count_processed()
# Start processing task
task_process = tasks.task_process_data()
total = 3
new_total_downloaded = total_downloaded
while new_total_downloaded == total_downloaded:
# Wait 3 seconds to process the response
yield gen.sleep(1)
new_total_downloaded = yield data.count_response()
if total_downloaded == new_total_downloaded:
total -= 1
assert total > 0
else:
# Stop task_process
task_process.cancel()
# Check the total in downloaded queue
new_total_downloaded = yield data.count_response()
# Check the total processed
new_total_processed = yield data.count_processed()
# Calculate the total removed from the downloaded queue
processed = total_downloaded - new_total_downloaded
# Check if the total processed is increased
# in the processed collection
assert total_processed == new_total_processed - processed
| [
"[email protected]"
] | |
18421b654dce2b71a36d0440ba6ced2729a412e6 | 659a8da3331f50e91578d48d47210abd5e88364b | /samples/BulkAQ.py | bb86c79f88772cb5139d6a726ba12381ac06b694 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ramboma/python-cx_Oracle | e15a073848ca273cf2d36c0be3ed2da30681a8cf | 3ab21c944705802b7433a5e3916143fe10af2b3f | refs/heads/master | 2020-05-23T23:57:16.166097 | 2019-05-03T21:49:57 | 2019-05-03T21:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | #------------------------------------------------------------------------------
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# BulkAQ.py
# This script demonstrates how to use bulk enqueuing and dequeuing of
# messages with advanced queuing using cx_Oracle. It makes use of a RAW queue
# created in the sample setup.
#
# This script requires cx_Oracle 7.2 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
QUEUE_NAME = "DEMORAW"
PAYLOAD_DATA = [
"The first message",
"The second message",
"The third message",
"The fourth message",
"The fifth message",
"The sixth message",
"The seventh message",
"The eighth message",
"The ninth message",
"The tenth message",
"The eleventh message",
"The twelfth and final message"
]
# connect to database
connection = cx_Oracle.connect(SampleEnv.GetMainConnectString())
cursor = connection.cursor()
# create queue
queue = connection.queue(QUEUE_NAME)
queue.deqOptions.wait = cx_Oracle.DEQ_NO_WAIT
queue.deqOptions.navigation = cx_Oracle.DEQ_FIRST_MSG
# dequeue all existing messages to ensure the queue is empty, just so that
# the results are consistent
while queue.deqOne():
pass
# enqueue a few messages
print("Enqueuing messages...")
batchSize = 6
dataToEnq = PAYLOAD_DATA
while dataToEnq:
batchData = dataToEnq[:batchSize]
dataToEnq = dataToEnq[batchSize:]
messages = [connection.msgproperties(payload=d) for d in batchData]
for data in batchData:
print(data)
queue.enqMany(messages)
connection.commit()
# dequeue the messages
print("\nDequeuing messages...")
batchSize = 8
while True:
messages = queue.deqMany(batchSize)
if not messages:
break
for props in messages:
print(props.payload.decode())
connection.commit()
print("\nDone.")
| [
"[email protected]"
] | |
7f80ab0319cfb31d50e0c4989e81ffc1f9d987c3 | 5a07e1afa5d172dcd4288f12636edd9c53148073 | /tests/test_calculator.py | 97b5fa4e3d9da0bf9e1d033cbced933a20b95265 | [
"Apache-2.0"
] | permissive | scikit-hep/pyhf | 3df3f9b12d1b362919629275b8746060833713f3 | 205eecfb0b57591eb6b70e98b01511797340a0c7 | refs/heads/main | 2023-09-02T18:50:35.990103 | 2023-08-31T00:10:41 | 2023-08-31T00:10:41 | 118,789,569 | 246 | 82 | Apache-2.0 | 2023-09-13T21:57:02 | 2018-01-24T16:14:39 | Python | UTF-8 | Python | false | false | 3,471 | py | import pytest
import pyhf
import pyhf.infer.calculators
def test_calc_dist():
asymptotic_dist = pyhf.infer.calculators.AsymptoticTestStatDistribution(0.0)
assert asymptotic_dist.pvalue(-1) == 1 - asymptotic_dist.cdf(-1)
@pytest.mark.parametrize("return_fitted_pars", [False, True])
def test_generate_asimov_can_return_fitted_pars(return_fitted_pars):
model = pyhf.simplemodels.uncorrelated_background([1, 1], [1, 1], [1, 1])
data = [2, 2, 1, 1] # [main x 2, aux x 2]
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
fixed_params = model.config.suggested_fixed()
result = pyhf.infer.calculators.generate_asimov_data(
1.0,
data,
model,
init_pars,
par_bounds,
fixed_params,
return_fitted_pars=return_fitted_pars,
)
if return_fitted_pars:
assert len(result) == 2
result, asimov_pars = result
assert pytest.approx([1.0, 1.0, 1.0]) == pyhf.tensorlib.tolist(asimov_pars)
assert pytest.approx([2.0, 2.0, 1.0, 1.0]) == pyhf.tensorlib.tolist(result)
# test different test stats because those affect the control flow
# in AsymptotiCalculator.teststatistic, where the fit results should be set
# the other kwargs don't impact the logic of that method,
# so leave them at the default so as not to put a burden on future changes
@pytest.mark.parametrize('test_stat', ['qtilde', 'q', 'q0'])
def test_asymptotic_calculator_has_fitted_pars(test_stat):
model = pyhf.simplemodels.uncorrelated_background([1], [1], [1])
data = [2, 1] # [main, aux]
calc = pyhf.infer.calculators.AsymptoticCalculator(data, model, test_stat=test_stat)
calc.teststatistic(0 if test_stat == 'q0' else 1)
assert hasattr(calc, 'fitted_pars')
fitted_pars = calc.fitted_pars
assert hasattr(fitted_pars, 'asimov_pars')
assert hasattr(fitted_pars, 'fixed_poi_fit_to_data')
assert hasattr(fitted_pars, 'fixed_poi_fit_to_asimov')
assert hasattr(fitted_pars, 'free_fit_to_data')
assert hasattr(fitted_pars, 'free_fit_to_asimov')
rtol = 1e-5
if test_stat == 'q0':
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.asimov_pars
)
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_data
)
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_asimov
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_data
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_asimov
)
else:
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.asimov_pars
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_data
)
assert pytest.approx([1.0, 1.1513553], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_asimov
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_data
)
assert pytest.approx(
[7.6470499e-05, 1.4997178], rel=rtol
) == pyhf.tensorlib.tolist(fitted_pars.free_fit_to_asimov)
| [
"[email protected]"
] | |
0e6826f5eca7265ecfe8da2ea0bfe62ddf51bdf4 | e6ea71d6acbb41bd40d3a17b352e19c6369d5c4b | /senpai/sticker.py | 52dad769df6f26168e74249728669b3127b54c5f | [
"MIT"
] | permissive | alexyy802/waifucord | bbfb50515ca23bf711e940ac8921092ff6d1e12e | c3bb883a6a148effb127781a885e839697df6a8b | refs/heads/master | 2023-09-02T13:19:44.478472 | 2021-10-29T06:51:51 | 2021-10-29T06:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,135 | py | """
The MIT License (MIT)
Copyright (c) 2021-present waifucord
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TYPE_CHECKING, List, Optional, Tuple, Type, Union
import unicodedata
from .mixins import Hashable
from .asset import Asset, AssetMixin
from .utils import cached_slot_property, find, snowflake_time, get, MISSING
from .errors import InvalidData
from .enums import StickerType, StickerFormatType, try_enum
__all__ = (
"StickerPack",
"StickerItem",
"Sticker",
"StandardSticker",
"GuildSticker",
)
if TYPE_CHECKING:
import datetime
from .state import ConnectionState
from .user import User
from .guild import Guild
from .chan.sticker import (
StickerPack as StickerPackPayload,
StickerItem as StickerItemPayload,
Sticker as StickerPayload,
StandardSticker as StandardStickerPayload,
GuildSticker as GuildStickerPayload,
ListPremiumStickerPacks as ListPremiumStickerPacksPayload,
EditGuildSticker,
)
class StickerPack(Hashable):
"""Represents a sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker pack.
.. describe:: x == y
Checks if the sticker pack is equal to another sticker pack.
.. describe:: x != y
Checks if the sticker pack is not equal to another sticker pack.
Attributes
-----------
name: :class:`str`
The name of the sticker pack.
description: :class:`str`
The description of the sticker pack.
id: :class:`int`
The id of the sticker pack.
stickers: List[:class:`StandardSticker`]
The stickers of this sticker pack.
sku_id: :class:`int`
The SKU ID of the sticker pack.
cover_sticker_id: :class:`int`
The ID of the sticker used for the cover of the sticker pack.
cover_sticker: :class:`StandardSticker`
The sticker used for the cover of the sticker pack.
"""
__slots__ = (
"_state",
"id",
"stickers",
"name",
"sku_id",
"cover_sticker_id",
"cover_sticker",
"description",
"_banner",
)
def __init__(self, *, state: ConnectionState, data: StickerPackPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPackPayload) -> None:
self.id: int = int(data["id"])
stickers = data["stickers"]
self.stickers: List[StandardSticker] = [
StandardSticker(state=self._state, data=sticker) for sticker in stickers
]
self.name: str = data["name"]
self.sku_id: int = int(data["sku_id"])
self.cover_sticker_id: int = int(data["cover_sticker_id"])
self.cover_sticker: StandardSticker = get(self.stickers, id=self.cover_sticker_id) # type: ignore
self.description: str = data["description"]
self._banner: int = int(data["banner_asset_id"])
@property
def banner(self) -> Asset:
""":class:`Asset`: The banner asset of the sticker pack."""
return Asset._from_sticker_banner(self._state, self._banner)
def __repr__(self) -> str:
return f"<StickerPack id={self.id} name={self.name!r} description={self.description!r}>"
def __str__(self) -> str:
return self.name
class _StickerTag(Hashable, AssetMixin):
__slots__ = ()
id: int
format: StickerFormatType
async def read(self) -> bytes:
"""|coro|
Retrieves the content of this sticker as a :class:`bytes` object.
.. note::
Stickers that use the :attr:`StickerFormatType.lottie` format cannot be read.
Raises
------
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
TypeError
The sticker is a lottie type.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if self.format is StickerFormatType.lottie:
raise TypeError('Cannot read stickers of format "lottie".')
return await super().read()
class StickerItem(_StickerTag):
"""Represents a sticker item.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker item.
.. describe:: x == y
Checks if the sticker item is equal to another sticker item.
.. describe:: x != y
Checks if the sticker item is not equal to another sticker item.
Attributes
-----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "name", "id", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerItemPayload):
self._state: ConnectionState = state
self.name: str = data["name"]
self.id: int = int(data["id"])
self.format: StickerFormatType = try_enum(
StickerFormatType, data["format_type"]
)
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<StickerItem id={self.id} name={self.name!r} format={self.format}>"
def __str__(self) -> str:
return self.name
async def fetch(self) -> Union[Sticker, StandardSticker, GuildSticker]:
"""|coro|
Attempts to retrieve the full sticker data of the sticker item.
Raises
--------
HTTPException
Retrieving the sticker failed.
Returns
--------
Union[:class:`StandardSticker`, :class:`GuildSticker`]
The retrieved sticker.
"""
data: StickerPayload = await self._state.http.get_sticker(self.id)
cls, _ = _sticker_factory(data["type"]) # type: ignore
return cls(state=self._state, data=data)
class Sticker(_StickerTag):
"""Represents a sticker.
.. versionadded:: 1.6
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The id of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "id", "name", "description", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPayload) -> None:
self.id: int = int(data["id"])
self.name: str = data["name"]
self.description: str = data["description"]
self.format: StickerFormatType = try_enum(
StickerFormatType, data["format_type"]
)
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<Sticker id={self.id} name={self.name!r}>"
def __str__(self) -> str:
return self.name
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the sticker's creation time in UTC."""
return snowflake_time(self.id)
class StandardSticker(Sticker):
"""Represents a sticker that is found in a standard sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The id of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
tags: List[:class:`str`]
A list of tags for the sticker.
sort_value: :class:`int`
The sticker's sort order within its pack.
"""
__slots__ = ("sort_value", "pack_id", "type", "tags")
def _from_data(self, data: StandardStickerPayload) -> None:
super()._from_data(data)
self.sort_value: int = data["sort_value"]
self.pack_id: int = int(data["pack_id"])
self.type: StickerType = StickerType.standard
try:
self.tags: List[str] = [tag.strip() for tag in data["tags"].split(",")]
except KeyError:
self.tags = []
def __repr__(self) -> str:
return (
f"<StandardSticker id={self.id} name={self.name!r} pack_id={self.pack_id}>"
)
async def pack(self) -> StickerPack:
"""|coro|
Retrieves the sticker pack that this sticker belongs to.
Raises
--------
InvalidData
The corresponding sticker pack was not found.
HTTPException
Retrieving the sticker pack failed.
Returns
--------
:class:`StickerPack`
The retrieved sticker pack.
"""
data: ListPremiumStickerPacksPayload = (
await self._state.http.list_premium_sticker_packs()
)
packs = data["sticker_packs"]
pack = find(lambda d: int(d["id"]) == self.pack_id, packs)
if pack:
return StickerPack(state=self._state, data=pack)
raise InvalidData(f"Could not find corresponding sticker pack for {self!r}")
class GuildSticker(Sticker):
"""Represents a sticker that belongs to a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
available: :class:`bool`
Whether this sticker is available for use.
guild_id: :class:`int`
The ID of the guild that this sticker is from.
user: Optional[:class:`User`]
The user that created this sticker. This can only be retrieved using :meth:`Guild.fetch_sticker` and
having the :attr:`~Permissions.manage_emojis_and_stickers` permission.
emoji: :class:`str`
The name of a unicode emoji that represents this sticker.
"""
__slots__ = ("available", "guild_id", "user", "emoji", "type", "_cs_guild")
def _from_data(self, data: GuildStickerPayload) -> None:
super()._from_data(data)
self.available: bool = data["available"]
self.guild_id: int = int(data["guild_id"])
user = data.get("user")
self.user: Optional[User] = self._state.store_user(user) if user else None
self.emoji: str = data["tags"]
self.type: StickerType = StickerType.guild
def __repr__(self) -> str:
return f"<GuildSticker name={self.name!r} id={self.id} guild_id={self.guild_id} user={self.user!r}>"
@cached_slot_property("_cs_guild")
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild that this sticker is from.
Could be ``None`` if the mai is not in the guild.
.. versionadded:: 2.0
"""
return self._state._get_guild(self.guild_id)
async def edit(
self,
*,
name: str = MISSING,
description: str = MISSING,
emoji: str = MISSING,
reason: Optional[str] = None,
) -> GuildSticker:
"""|coro|
Edits a :class:`GuildSticker` for the guild.
Parameters
-----------
name: :class:`str`
The sticker's new name. Must be at least 2 characters.
description: Optional[:class:`str`]
The sticker's new description. Can be ``None``.
emoji: :class:`str`
The name of a unicode emoji that represents the sticker's expression.
reason: :class:`str`
The reason for editing this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to edit stickers.
HTTPException
An error occurred editing the sticker.
Returns
--------
:class:`GuildSticker`
The newly modified sticker.
"""
payload: EditGuildSticker = {}
if name is not MISSING:
payload["name"] = name
if description is not MISSING:
payload["description"] = description
if emoji is not MISSING:
try:
emoji = unicodedata.name(emoji)
except TypeError:
pass
else:
emoji = emoji.replace(" ", "_")
payload["tags"] = emoji
data: GuildStickerPayload = await self._state.http.modify_guild_sticker(
self.guild_id, self.id, payload, reason
)
return GuildSticker(state=self._state, data=data)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Sticker` from the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete stickers.
HTTPException
An error occurred deleting the sticker.
"""
await self._state.http.delete_guild_sticker(self.guild_id, self.id, reason)
def _sticker_factory(
sticker_type: Literal[1, 2]
) -> Tuple[Type[Union[StandardSticker, GuildSticker, Sticker]], StickerType]:
value = try_enum(StickerType, sticker_type)
if value == StickerType.standard:
return StandardSticker, value
elif value == StickerType.guild:
return GuildSticker, value
else:
return Sticker, value
| [
"[email protected]"
] | |
1210fa97dd02d25bcf6f192141e2e585187fb4d8 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /runtime/mlir_tests/lit.cfg.py | d3cc5aa1abc19890ca261746033475c5e3deb1c0 | [
"Apache-2.0"
] | permissive | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # Copyright 2020 The TensorFlow Runtime Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# pylint: disable=undefined-variable
# name: The name of this test suite.
config.name = 'TFRT'
# test_format: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.mlir']
# test_source_root: The root path where tests are located.
config.test_source_root = config.tfrt_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = config.runfile_srcdir
llvm_config.use_default_substitutions()
llvm_config.config.substitutions.append(
('%tfrt_bindir', 'tensorflow/compiler/aot'))
tool_dirs = config.tfrt_tools_dirs + [config.llvm_tools_dir]
tool_names = [
'bef_executor', 'bef_name', 'tfrt_translate', 'tfrt_opt',
'tfrt_gpu_translate', 'tfrt_gpu_opt', 'code_size_test_driver'
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
# pylint: enable=undefined-variable
| [
"[email protected]"
] | |
dcf6e27fe7dbce5fea81433a54d9c38b7c123a7d | 8ec8579880082783bb4efa30bc9ea1a3bc5b13c4 | /dpf/process/__init__.py | 7971abef57f19d66294b7b5d14132e4566fad487 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | chaselgrove/dpf | 06fd280ccd747d7fd8d3a1ac73a863860b726f74 | fac71dd744fecb07b88f73a5dea10fd87fe006be | refs/heads/master | 2021-03-19T13:07:06.167775 | 2018-05-22T17:28:38 | 2018-05-22T17:28:38 | 19,864,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,728 | py | # See file COPYING distributed with dpf for copyright and license.
import os
import traceback
import tempfile
import shutil
import wsgiref
import sqlite3
import json
import dpf
db_ddl = """CREATE TABLE job (id TEXT NOT NULL PRIMARY KEY,
process TEXT NOT NULL,
deleted BOOLEAN NOT NULL DEFAULT 0);"""
def sqlite_convert_boolean(i):
if i == '1':
return True
return False
def sqlite_adapt_boolean(b):
if b:
return 1
return 0
sqlite3.register_adapter(bool, sqlite_adapt_boolean)
sqlite3.register_converter('boolean', sqlite_convert_boolean)
class Application(dpf.Application):
def __init__(self, base_dir, process_handlers):
self.base_dir = base_dir
self.process_handlers = {}
for (label, ph) in process_handlers.iteritems():
label = label.strip('/')
self.process_handlers[label] = ph
self.db_fname = os.path.join(self.base_dir, 'jobs.sqlite')
if not os.path.exists(self.db_fname):
db = sqlite3.connect(self.db_fname)
c = db.cursor()
c.execute(db_ddl)
c.close()
db.commit()
db.close()
return
def __call__(self, environ, start_response):
try:
path = environ['PATH_INFO']
if not path:
path = '/'
if path == '/':
(status, headers, oi) = self.handle_root(environ)
elif path.startswith('/job/'):
(status, headers, oi) = self.handle_job(environ)
else:
(status, headers, oi) = self.handle_process(environ)
start_response(status, headers)
return oi
except dpf.BaseHTTPError, exc:
status = exc.status
headers = exc.headers
output = exc.content
except:
traceback.print_exc()
status = '500 Internal Server Error'
output = 'A server error occurred. ' + \
'Please contact the administrator.\n'
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, headers)
return [output]
def handle_root(self, environ):
if environ['REQUEST_METHOD'] == 'GET':
mt = dpf.choose_media_type(dpf.get_accept(environ),
['text/plain', 'application/json'])
if mt == 'text/plain':
output = 'Available:\n'
for label in sorted(self.process_handlers):
ph = self.process_handlers[label]
output += ' %s: %s\n' % (label, ph.description)
else:
l = {}
for (label, ph) in self.process_handlers.iteritems():
l[label] = ph.description
output = json.dumps(l) + '\n'
headers = [('Content-Type', mt),
('Content-Length', str(len(output)))]
oi = [output]
return ('200 OK', headers, oi)
raise dpf.HTTP405MethodNotAllowed(['GET'])
def handle_process(self, environ):
path = environ['PATH_INFO']
try:
process_name = path.strip('/')
ph = self.process_handlers[process_name]
except KeyError:
raise dpf.HTTP404NotFound()
if environ['REQUEST_METHOD'] == 'GET':
(content_type, output) = ph.get_doc(dpf.get_accept(environ))
headers = [('Content-Type', content_type),
('Content-Length', str(len(output)))]
oi = [output]
return ('200 OK', headers, oi)
if environ['REQUEST_METHOD'] == 'POST':
try:
content_length = int(environ['CONTENT_LENGTH'])
except KeyError:
raise dpf.HTTP411LengthRequired()
except ValueError:
raise dpf.HTTP400BadRequest('text/plain',
'Bad content-length.\n')
if content_length < 0:
raise dpf.HTTP400BadRequest('text/plain',
'Bad content-length.\n')
if 'CONTENT_TYPE' not in environ:
raise dpf.HTTP400BadRequest('text/plain', 'No content-type.\n')
data = environ['wsgi.input'].read(content_length)
job_dir = tempfile.mkdtemp(prefix='', dir=self.base_dir)
try:
ident = os.path.basename(job_dir)
open(os.path.join(job_dir, 'data'), 'w').write(data)
if 'CONTENT_TYPE' in environ:
ct = environ['CONTENT_TYPE']
open(os.path.join(job_dir, 'content-type'), 'w').write(ct)
ph.launch(job_dir)
except:
shutil.rmtree(job_dir)
raise
self.register_job(ident, process_name)
app_uri = wsgiref.util.application_uri(environ).rstrip('/')
headers = [('Location', '%s/job/%s' % (app_uri, ident)),
('Content-Length', '0')]
return ('201 Created', headers, [''])
raise dpf.HTTP405MethodNotAllowed(['GET', 'POST'])
def handle_job(self, environ):
assert environ['PATH_INFO'].startswith('/job/')
ident = environ['PATH_INFO'][5:].split('/')[0]
try:
job_dict = self.get_job(ident)
except ValueError:
raise dpf.HTTP404NotFound()
if job_dict['deleted']:
raise dpf.HTTP410Gone()
ph = self.process_handlers[job_dict['process']]
job_dir = os.path.join(self.base_dir, ident)
job_url = '/job/%s' % ident
if environ['REQUEST_METHOD'] == 'GET':
accept = dpf.get_accept(environ)
if environ['PATH_INFO'] == job_url or \
environ['PATH_INFO'] == job_url+'/':
(content_type, output) = ph.info(accept, job_dir)
headers = [('Content-Type', content_type),
('Content-Length', str(len(output)))]
oi = [output]
return ('200 OK', headers, oi)
subpath = environ['PATH_INFO'][len(job_url)+1:]
(content_type, content) = ph.get_subpart(accept, job_dir, subpath)
headers = [('Content-Type', content_type),
('Content-Length', str(len(content)))]
return ('200 OK', headers, [content])
if environ['REQUEST_METHOD'] == 'DELETE':
if environ['PATH_INFO'] == job_url or \
environ['PATH_INFO'] == job_url+'/':
self.delete_job(ident)
ph.delete(job_dir)
shutil.rmtree(job_dir)
return ('204 No Content', [], [''])
subpath = environ['PATH_INFO'][len(job_url)+1:]
# we just use this to raise the 404 if the subpart doesn't exist;
# if it does...
ph.get_subpart(dpf.get_accept(environ), job_dir, subpath)
# ...fall through to method not allowed
raise dpf.HTTP405MethodNotAllowed(['GET'])
raise dpf.HTTP405MethodNotAllowed(['GET', 'DELETE'])
def register_job(self, ident, process):
db = sqlite3.connect(self.db_fname,
detect_types=sqlite3.PARSE_DECLTYPES)
try:
c = db.cursor()
c.execute("INSERT INTO job (id, process) VALUES (?, ?)",
(ident, process))
c.close()
db.commit()
finally:
db.close()
return
def get_job(self, ident):
db = sqlite3.connect(self.db_fname,
detect_types=sqlite3.PARSE_DECLTYPES)
try:
c = db.cursor()
c.execute("SELECT * FROM job WHERE id = ?", (ident, ))
cols = [ el[0] for el in c.description ]
row = c.fetchone()
if not row:
raise ValueError('no job %s in database' % ident)
d = dict(zip(cols, row))
c.close()
finally:
db.close()
return d
def delete_job(self, ident):
db = sqlite3.connect(self.db_fname,
detect_types=sqlite3.PARSE_DECLTYPES)
try:
c = db.cursor()
c.execute("UPDATE job SET deleted = ? WHERE id = ?", (True, ident))
if not c.rowcount:
raise ValueError('no job %s in database' % ident)
c.close()
db.commit()
finally:
db.close()
return
# eof
| [
"[email protected]"
] | |
596451015e496db3243616f6d052da8d175442f8 | 752116ef4b69a3049fef0cfe9b3d212548cc81b1 | /sources/actions/profile/show.py | cd5a597e53b9b334781c5f88d5e5e68611ecb4f7 | [] | no_license | VDOMBoxGroup/runtime2.0 | e54af4af7a642f34b0e07b5d4096320494fb9ae8 | cb9932f5f75d5c6d7889f26d58aee079b4127299 | refs/heads/develop | 2023-07-07T11:06:10.817093 | 2023-07-03T06:11:55 | 2023-07-03T06:11:55 | 62,622,255 | 0 | 12 | null | 2023-05-23T02:55:00 | 2016-07-05T09:09:48 | Python | UTF-8 | Python | false | false | 4,565 | py |
import pstats
import re
from itertools import izip
import settings
import managers
import file_access
from utils.console import CONSOLE_WIDTH
from utils.tracing import BINARY_ALIAS, SERVER_ALIAS, TYPES_ALIAS, APPLICATIONS_ALIAS, format_source_point
from utils.auxiliary import fit, fill
from ..auxiliary import section, show, warn
LOCATION_WIDTH = 99
CALLS_WIDTH = 9
TIME_WIDTH = 11
COLUMNS = (
(-LOCATION_WIDTH, "name", "%*s"),
(CALLS_WIDTH, "calls", "%*d"),
(TIME_WIDTH, "total", "%*.4f"),
(TIME_WIDTH, "cumulative", "%*.4f")
)
SEPARATOR = " "
FILLER = "-"
SORT_BY_NAME = "SORT BY NAME"
SORT_BY_CALLS = "SORT BY CALLS"
SORT_BY_TOTAL = "SORT BY TOTAL"
SORT_BY_CUMULATIVE = "SORT BY CUMULATIVE"
SORT_VALUES = {
"n": SORT_BY_NAME,
"name": SORT_BY_NAME,
"ca": SORT_BY_CALLS,
"calls": SORT_BY_CALLS,
"t": SORT_BY_TOTAL,
"total": SORT_BY_TOTAL,
"cu": SORT_BY_CUMULATIVE,
"cumulative": SORT_BY_CUMULATIVE
}
ORDER_BY_ASCENDING = "ORDER BY ASCENDING"
ORDER_BY_DESCENDING = "ORDER BY DESCENDING"
ORDER_VALUES = {
"a": ORDER_BY_ASCENDING,
"asc": ORDER_BY_ASCENDING,
"ascending": ORDER_BY_ASCENDING,
"d": ORDER_BY_DESCENDING,
"desc": ORDER_BY_DESCENDING,
"descending": ORDER_BY_DESCENDING
}
SORT_MAPPING = {
SORT_BY_NAME: lambda item: item[0],
SORT_BY_CALLS: lambda item: item[1],
SORT_BY_TOTAL: lambda item: item[2],
SORT_BY_CUMULATIVE: lambda item: item[3]
}
BUILD_IN_PATTERN = re.compile("\<built-in method (?P<name>.+)\>")
METHOD_PATTERN = re.compile("\<method '(?P<name>.+)' of '(?P<class>.+)' objects\>")
def make_name(path, line, function):
if path == "~":
match = METHOD_PATTERN.match(function)
if match:
name = "%s.%s" % (match.group("class"), match.group("name"))
else:
match = BUILD_IN_PATTERN.match(function)
if match:
name = "%s" % match.group("name")
else:
name = function[1:-1]
return fit(name, LOCATION_WIDTH)
else:
return format_source_point(path, line, function, width=LOCATION_WIDTH)
def run(name=None, location=None, headers=False, sort=None, order=None, limit=50, nolimit=False, all=False):
"""
show server last profile statistics: name, calls, total and cumulative times
:arg name: specifies profile name
:arg location: input file location with stored profile statistics
:key switch headers: show columns headers
:key sort: sort entries by "name", by "calls", by "total" or by "cumulative"
:key order: sort entries "asc"ending or "desc"ending
:key switch nolimit: disable output entries limit
:key int limit: limit output to specified number of entries
:key switch all: show all entries including from non-server code
"""
if location is None:
location = settings.PROFILE_FILENAME_TEMPLATE % (name or settings.PROFILE_DEFAULT_NAME)
elif name is not None:
warn("name and location are mutually exclusive options")
return
if not managers.file_manager.exists(file_access.FILE, None, location):
warn("no profile")
return
sort = SORT_VALUES.get((sort or "").lower(), SORT_BY_TOTAL)
if sort is SORT_BY_NAME and order is None:
order = "asc"
order = ORDER_VALUES.get((order or "").lower(), ORDER_BY_DESCENDING)
if nolimit:
limit = None
profile = pstats.Stats(location)
statistics = tuple((make_name(path, line, function), calls, total, cumulative)
for (path, line, function), (calls, stack, total, cumulative, more)
in profile.stats.iteritems())
key = SORT_MAPPING[sort]
reverse = order is ORDER_BY_DESCENDING
entries = sorted(statistics, key=key, reverse=reverse)
with section("statistics", width=CONSOLE_WIDTH):
if headers:
show(SEPARATOR.join("%*s" % (width, label) for width, label, template in COLUMNS))
show(SEPARATOR.join(fill(FILLER, abs(width)) for width, label, template in COLUMNS))
index = 0
for entry in entries:
if not (all
or entry[0].startswith(BINARY_ALIAS)
or entry[0].startswith(TYPES_ALIAS)
or entry[0].startswith(APPLICATIONS_ALIAS)
or entry[0].startswith(SERVER_ALIAS)):
continue
show(SEPARATOR.join(template % (width, value) for value, (width, label, template) in izip(entry, COLUMNS)))
if index == limit:
break
index += 1
| [
"[email protected]"
] | |
a9661e7b5c5f4b1de005a245508c6ca122738ecc | effeae00f945e10e5c5a52f28d813c0b8b76d569 | /app/Test/tasks-dev.py | e6d6732d024a33c32d90de0a4b00bc618df38dc0 | [] | no_license | maro99/Celery-tutorials | 3e4833e44dcf9cee69f07303929cbdafd760234c | c554e17d210071168dac06c8798a889048014f9f | refs/heads/master | 2020-03-26T16:15:13.393049 | 2018-08-17T07:55:00 | 2018-08-17T07:55:00 | 145,090,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # Create your tasks here
from __future__ import absolute_import, unicode_literals
import datetime
import time
from celery import shared_task
# from Test.models import Task
@shared_task
def add(x, y):
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
# @shared_task
# def long_task():
# start = datetime.datetime.now()
# task = Task.objects.create(
# start_at=datetime.datetime.now(),
# end_at=datetime.datetime.now(),
# )
# time.sleep(10)
# end = datetime.datetime.now()
# task.start_at=start
# task.end_at=end
# task.save() | [
"[email protected]"
] | |
56b13cb050bed40abe9d9b579c1da59cc8dc2c0d | 4d360320e06339a4f7d2a2723cddf02ff02a306e | /0x10-python-network_0/6-peak.py | 574e06c979ee4c5f8dcc9677cbca278c37e8ad95 | [] | no_license | AmineNeifer/holbertonschool-higher_level_programming | fd6ccdb1b5f0dc85e10750e9f2c7824290697e85 | f5c42bff003b85a7c19702e0233997645fce2fb1 | refs/heads/master | 2020-09-29T02:56:52.286548 | 2020-05-15T00:12:50 | 2020-05-15T00:12:50 | 226,933,206 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #!/usr/bin/python3
""" funtion find_peak"""
def find_peak(list_of_integers):
""" find a peak in a list of integers
Arguments:
list_of_integers: list of int
Returns:
int or None if list is empty
"""
if list_of_integers == []:
return None
length = len(list_of_integers)
if length == 1:
return list_of_integers[0]
for i in range(length):
current = list_of_integers[i]
n = list_of_integers[i+1]
p = list_of_integers[i-1]
if current >= n and current >= p:
return current
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.